Merge remote-tracking branch 'remotes/origin/v2.3.0'

# Conflicts:
#	README.md
#	RELEASE_NOTES.md
#	build.properties
#	infrastructure-provisioning/src/general/files/aws/deeplearning_description.json
#	infrastructure-provisioning/src/general/files/azure/deeplearning_description.json
#	infrastructure-provisioning/src/general/files/gcp/deeplearning_description.json
#	infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
#	infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
#	infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
#	infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
#	infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
#	infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
#	infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
#	infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate_gcp_resources.py
#	infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
#	infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
#	infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
#	infrastructure-provisioning/terraform/aws/computational_resources/main/main.tf
#	infrastructure-provisioning/terraform/aws/computational_resources/main/variables.tf
#	infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
#	infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
#	infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
#	infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
#	infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
#	infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
#	infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
#	infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
#	infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
#	infrastructure-provisioning/terraform/aws/endpoint/main/variables.tf
#	infrastructure-provisioning/terraform/aws/project/main/iam.tf
#	infrastructure-provisioning/terraform/aws/project/main/instance.tf
#	infrastructure-provisioning/terraform/aws/project/main/network.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-billing.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/configmap-ui-conf.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/deployment.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/service.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/values.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/configure_keycloak.sh
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/keycloak_values.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mongo_values.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mysql_keycloak_values.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/nginx_values.yaml
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/keycloak.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/main.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mongo.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mysql.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/nginx.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/secrets.tf
#	infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/variables.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/masters-user-data.sh
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/ssn-policy.json.tpl
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/workers-user-data.sh
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/main.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/variables.tf
#	infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
#	infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
#	infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
#	infrastructure-provisioning/terraform/azure/project/main/instance.tf
#	infrastructure-provisioning/terraform/bin/deploy/__init__.py
#	infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
#	infrastructure-provisioning/terraform/bin/deploy/provisioning.yml
#	infrastructure-provisioning/terraform/bin/deploy/supervisor_svc.conf
#	infrastructure-provisioning/terraform/bin/dlab.py
#	infrastructure-provisioning/terraform/bin/terraform-cli.py
#	infrastructure-provisioning/terraform/gcp/endpoint/main/iam.tf
#	infrastructure-provisioning/terraform/gcp/endpoint/main/instance.tf
#	infrastructure-provisioning/terraform/gcp/endpoint/main/main.tf
#	infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
#	infrastructure-provisioning/terraform/gcp/endpoint/main/variables.tf
#	infrastructure-provisioning/terraform/gcp/endpoint/provisioning.py
#	infrastructure-provisioning/terraform/gcp/endpoint/provisioning.yml
#	infrastructure-provisioning/terraform/gcp/main/main.tf
#	infrastructure-provisioning/terraform/gcp/main/variables.tf
#	infrastructure-provisioning/terraform/gcp/modules/common/iam.tf
#	infrastructure-provisioning/terraform/gcp/modules/common/network.tf
#	infrastructure-provisioning/terraform/gcp/modules/common/variables.tf
#	infrastructure-provisioning/terraform/gcp/modules/data_engine/instance.tf
#	infrastructure-provisioning/terraform/gcp/modules/data_engine/variables.tf
#	infrastructure-provisioning/terraform/gcp/modules/dataproc/instance.tf
#	infrastructure-provisioning/terraform/gcp/modules/dataproc/variables.tf
#	infrastructure-provisioning/terraform/gcp/modules/notebook/instance.tf
#	infrastructure-provisioning/terraform/gcp/modules/notebook/variables.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/main.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/buckets.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/iam.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-billing.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/configmap-ui-conf.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/deployment.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/ingress.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/values.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/configure_keycloak.sh
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/keycloak_values.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mongo_values.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mysql_values.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/nginx_values.yaml
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/keycloak.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/main.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mongo.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mysql.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/outputs.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/secrets.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/variables.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/outputs.tf
#	infrastructure-provisioning/terraform/gcp/ssn-gke/main/variables.tf
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/css/login.css
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/favicon.ico
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-arrow-down.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-sign.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-arrow-down.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-sign.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-arrow-down.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-sign.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/keycloak-logo.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-icons.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/logo.png
#	infrastructure-provisioning/terraform/keycloak-theme/dlab/login/theme.properties
#	integration-tests-cucumber/pom.xml
#	integration-tests-cucumber/src/main/java/org/apache/dlab/dto/EndpointDTO.java
#	integration-tests-cucumber/src/main/java/org/apache/dlab/mongo/MongoDBHelper.java
#	integration-tests-cucumber/src/main/java/org/apache/dlab/util/JacksonMapper.java
#	integration-tests-cucumber/src/main/java/org/apache/dlab/util/PropertyHelper.java
#	integration-tests-cucumber/src/test/java/dlab/Constants.java
#	integration-tests-cucumber/src/test/java/dlab/RunCucumberTest.java
#	integration-tests-cucumber/src/test/java/dlab/endpoint/EndpointSteps.java
#	integration-tests-cucumber/src/test/java/dlab/login/LoginSteps.java
#	integration-tests-cucumber/src/test/resources/config.properties
#	integration-tests-cucumber/src/test/resources/dlab/endpoint.feature
#	integration-tests-cucumber/src/test/resources/dlab/login.feature
#	integration-tests/examples/scenario_deeplearning/deeplearning_tests.py
#	integration-tests/examples/test_templates/deeplearning/conv.prototxt
#	integration-tests/pom.xml
#	integration-tests/src/test/java/com/epam/dlab/automation/test/TestServices.java
#	services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureBillableResourcesService.java
#	services/billing-azure/src/main/java/com/epam/dlab/billing/azure/model/AzureDailyResourceInvoice.java
#	services/billing-gcp/billing.yml
#	services/billing-gcp/src/main/resources/application.yml
#	services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java
#	services/self-service/entrypoint_aws.sh
#	services/self-service/entrypoint_gcp.sh
#	services/self-service/src/main/java/com/epam/dlab/backendapi/SelfServiceApplication.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDao.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDaoImpl.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsBillingDAO.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BaseShape.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineServiceShape.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineShape.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointShape.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryShape.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectManagingDTO.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/domain/SsnShape.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/service/ShapeFormat.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsBillingService.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
#	services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
#	services/self-service/src/main/resources/webapp/browserslist
#	services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
#	services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
#	services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
#	services/self-service/src/main/resources/webapp/src/app/resources/resources.component.ts
#	services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsBillingServiceTest.java
#	services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
#	services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
diff --git a/README.md b/README.md
index a175e2d..0d9202b 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,8 @@
 
         [Preparing environment for DLab deployment](#Env_for_DLab)
 
+        [Keycloak server](#Keycloak_server)
+
         [Self-Service Node](#Self_Service_Node)
 
         [Edge Node](#Edge_Node)
@@ -66,8 +68,11 @@
 ---------------
 # What is DLAB? <a name="What_is_DLAB"></a>
 
-DLab is an essential toolset for analytics. It is a self-service Web Console, used to create and manage exploratory environments. It allows teams to spin up analytical environments with best of breed open-source tools just with a single click of the mouse. Once established, environment can be managed by an analytical team itself, leveraging simple and easy-to-use Web Interface.
-<p>See more at <a href="http://dlab.opensource.epam.com/" rel="nofollow">dlab.opensource.epam.com</a>.</p>
+DLab is an essential toolset for analytics. It is a self-service Web Console, used to create and manage exploratory 
+environments. It allows teams to spin up analytical environments with best of breed open-source tools just with a 
+single click of the mouse. Once established, environment can be managed by an analytical team itself, leveraging simple 
+and easy-to-use Web Interface.
+<p>See more at <a href="https://dlab.apache.org/" rel="nofollow">dlab.apache.org</a>.</p>
 
 ----------------------------
 # Logical architecture <a name="Logical_architecture"></a>
@@ -76,19 +81,28 @@
 
 ![Logical architecture](doc/logical_architecture.png)
 
-The diagram shows main components of DLab, which is a self-service for the infrastructure deployment and interaction with it. The purpose of each component is described below.
+The diagram shows main components of DLab, which is a self-service for the infrastructure deployment and interaction 
+with it. The purpose of each component is described below.
 
 ## Self-Service
 
-Self-Service is a service, which provides RESTful user API with Web User Interface for data scientist. It tightly interacts with Provisioning Service and Database. Self-Service delegates all user\`s requests to Provisioning Service. After execution of certain request from Self-service, Provisioning Service returns response about corresponding action happened with particular resource. Self-service, then, saves this response into Database. So, each time Self-Service receives request about status of provisioned infrastructure resources – it loads it from Database and propagates to Web UI.
+Self-Service is a service, which provides RESTful user API with Web User Interface for data scientist. It tightly 
+interacts with Provisioning Service and Database. Self-Service delegates all user\`s requests to Provisioning Service. 
+After execution of certain request from Self-service, Provisioning Service returns response about corresponding action 
+happened with particular resource. Self-service, then, saves this response into Database. So, each time Self-Service 
+receives request about status of provisioned infrastructure resources – it loads it from Database and propagates to Web UI.
 
 ## Billing
 
-Billing is a module, which provides a loading of the billing report for the environment to the database. It can be running as part of the Self-Service or a separate process.
+Billing is a module, which provides a loading of the billing report for the environment to the database. It can be 
+running as part of the Self-Service or a separate process.
 
 ## Provisioning Service
 
-The Provisioning Service is a RESTful service, which provides APIs for provisioning of the user’s infrastructure. Provisioning Service receives the request from Self-Service, afterwards it forms and sends a command to the docker to execute requested action. Docker executes the command and generates a response.json file. Provisioning service analyzes response.json and responds to initial request of Self-Service, providing status-related information of the instance.
+The Provisioning Service is a RESTful service, which provides APIs for provisioning of the user’s infrastructure. 
+Provisioning Service receives the request from Self-Service, afterwards it forms and sends a command to the docker 
+to execute requested action. Docker executes the command and generates a response.json file. Provisioning service 
+analyzes response.json and responds to initial request of Self-Service, providing status-related information of the instance.
 
 ## Security service
 
@@ -105,17 +119,24 @@
 -----------------------------
 # Physical architecture <a name="Physical_architecture"></a>
 
-The following diagrams demonstrate high-level physical architecture of DLab in AWS, Azure and GCP.
+The following diagrams demonstrate high-level physical architecture of DLab in AWS, GCP and Azure.
 
-![Physical architecture](doc/physical_architecture.png)
+Diagram of Dlab physical architecture on AWS:
 
-![Physical architecture](doc/azure_dlab_arch.png)
+![Physical architecture](doc/dlab_aws.png)
 
-![Physical architecture](doc/gcp_dlab.png)
+Diagram of Dlab physical architecture on GCP:
+
+![Physical architecture](doc/dlab_gcp.png)
+
+Diagram of Dlab physical architecture on Azure:
+
+![Physical architecture](doc/dlab_azure.png)
 
 ## Main components
 
 -   Self-service node (SSN)
+-   Endpoint node
 -   Edge node
 -   Notebook node (Jupyter, Rstudio, etc.)
 -   Data engine cluster
@@ -125,37 +146,52 @@
 
 Creation of self-service node – is the first step for deploying DLab. SSN is a main server with following pre-installed services:
 
--   DLab Web UI – is Web user interface for managing/deploying all components of DLab. It is accessible by the following URL: http[s]://SSN\_Public\_IP\_or\_Public\_DNS
--   MongoDB – is a database, which contains part of DLab’s configuration, user’s exploratory environments description as well as user’s preferences.
+-   DLab Web UI – is Web user interface for managing/deploying all components of DLab. It is accessible by the 
+    following URL: http[s]://SSN\_Public\_IP\_or\_Public\_DNS
+-   MongoDB – is a database, which contains part of DLab’s configuration, user’s exploratory environments description 
+    as well as user’s preferences.
 -   Docker – used for building DLab Docker containers, which will be used for provisioning other components.
--   Jenkins – is an alternative to Web UI. It is accessible by the following link: http[s]://SSN\_Public\_IP\_or\_Public\_DNS/jenkins
 
-Elastic(Static) IP address is assigned to an SSN Node, so you are free to stop|start it and and SSN node's IP address won’t change.
+Elastic(Static) IP address is assigned to an SSN Node, so you are free to stop|start it and and SSN node's IP address 
+won’t change.
+
+## Endpoint
+
+This is a node which serves as a provisioning endpoint for Dlab resources. Endpoint machine is deployed separately from Dlab
+installation and can be even deployed on a different cloud.
 
 ## Edge node
 
-Setting up Edge node is the first step that user is asked to do once logged into DLab. This node is used as proxy server and SSH gateway for the user. Through Edge node users can access Notebook via HTTP and SSH. Edge Node has a Squid HTTP web proxy pre-installed.
+This node is used as a reverse-proxy server for the user. Through Edge node users can access Notebook via HTTPS. 
+Edge Node has a Nginx reverse-proxy pre-installed.
 
 ## Notebook node
 
-The next step is setting up a Notebook node (or a Notebook server). It is a server with pre-installed applications and libraries for data processing, data cleaning and transformations, numerical simulations, statistical modeling, machine learning, etc. Following analytical tools are currently supported in DLab and can be installed on a Notebook node:
+The next step is setting up a Notebook node (or a Notebook server). It is a server with pre-installed applications and 
+libraries for data processing, data cleaning and transformations, numerical simulations, statistical modeling, machine 
+learning, etc. Following analytical tools are currently supported in DLab and can be installed on a Notebook node:
 
 -   Jupyter
+-   Jupyterlab
 -   RStudio
 -   Apache Zeppelin
 -   TensorFlow + Jupyter
+-   TensorFlow + RStudio
 -   Deep Learning + Jupyter
 
 Apache Spark is also installed for each of the analytical tools above.
 
-**Note:** terms 'Apache Zeppelin' and 'Apache Spark' hereinafter may be referred to as 'Zeppelin' and 'Spark' respectively or may have original reference.
+**Note:** terms 'Apache Zeppelin' and 'Apache Spark' hereinafter may be referred to as 'Zeppelin' and 'Spark' 
+respectively or may have original reference.
 
 ## Data engine cluster
 
 After deploying Notebook node, user can create one of the cluster for it:
 -   Data engine - Spark standalone cluster
 -   Data engine service - cloud managed cluster platform (EMR for AWS or Dataproc for GCP)
-That simplifies running big data frameworks, such as Apache Hadoop and Apache Spark to process and analyze vast amounts of data. Adding cluster is not mandatory and is only needed in case additional computational resources are required for job execution.
+That simplifies running big data frameworks, such as Apache Hadoop and Apache Spark to process and analyze vast amounts 
+of data. Adding cluster is not mandatory and is only needed in case additional computational resources are required for 
+job execution.
 ----------------------
 # DLab Deployment <a name="DLab_Deployment"></a>
 
@@ -192,6 +228,7 @@
                  ├───dateengine-service
                  ├───edge
                  ├───notebook
+                 ├───project
                  └───ssn
 
 These directories contain the log files for each template and for DLab back-end services.
@@ -201,39 +238,98 @@
 -   selfservice.log – Self-Service log file;
 -   edge, notebook, dataengine, dataengine-service – contains logs of Python scripts.
 
+## Keycloak server <a name="Keycloak_server"></a>
+
+**Keycloak** is used to manage user authentication instead of the aplication. To use existing server following 
+  parameters must be specified either when running *Dlab* deployment script or in 
+*/opt/dlab/conf/self-service.yml* and */opt/dlab/conf/provisioning.yml* files on SSN node.
+
+| Parameter                | Description/Value             |
+|--------------------------|-------------------------------|
+| keycloak_realm_name      |Keycloak Realm name            |
+| keycloak_auth_server_url |Keycloak auth server URL       |
+| keycloak_client_name     |Keycloak client name           |
+| keycloak_client_secret   |Keycloak client secret         |
+| keycloak_user            |Keycloak user                  |
+| keycloak_user_password   |Keycloak user password         |
+
+### Preparing environment for Keycloak deployment <a name="Env_for_DLab"></a>
+Keycloak can be deployed with Nginx proxy on instance using *deploy_keycloak.py* script. Currently it only works with HTTP.
+
+Preparation steps for deployment:
+
+- Create an VM instance with the following settings:
+    - The instance should have access to Internet in order to install required prerequisites
+    - Boot disk OS Image - Ubuntu 18.04
+- Put private key that is used to connect to instance where Keycloak will be deployed somewhere on the instance where 
+  deployment script will be executed.
+- Install Git and clone DLab repository</details>
+### Executing deployment script
+To build Keycloak node, following steps should be executed:
+- Connect to the instance via SSH and run the following commands:
+```
+sudo su
+apt-get update
+apt-get install -y python-pip
+pip install fabric==1.14.0
+```
+- Go to *dlab* directory
+- Run *infrastructure-provisioning/scripts/deploy_keycloak/deploy_keycloak.py* deployment script:
+
+```
+/usr/bin/python infrastructure-provisioning/scripts/deploy_keycloak/deploy_keycloak.py --os_user ubuntu --keyfile ~/.ssh/key.pem --keycloak_realm_name test_realm_name  --keycloak_user admin --keycloak_user_password admin_password --public_ip_address XXX.XXX.XXX.XXX
+```
+
+List of parameters for Keycloak node deployment:
+
+| Parameter                 | Description/Value                                                                       |
+|---------------------------|-----------------------------------------------------------------------------------------|
+| os_user                   | username, used to connect to the instance |
+| keyfile                   | /path_to_key/private_key.pem, used to connect to instance |
+| keycloak_realm_name       | Keycloak realm name that will be created |
+| keycloak_user             | initial keycloak admin username |
+| keycloak_user_password    | password for initial keycloak admin user |
+| public_ip_address         | Public IP address of the instance (if not specified, keycloak will be deployed on localhost)* (On AWS try to specify Public DNS (IPv4) instead of IPv4 if unable to connect)* |
+
+
 ## Self-Service Node <a name="Self_Service_Node"></a>
 
 ### Preparing environment for DLab deployment <a name="Env_for_DLab"></a>
 
 Deployment of DLab starts from creating Self-Service(SSN) node. DLab can be deployed in AWS, Azure and Google cloud.
+
 For each cloud provider, prerequisites are different.
 
 <details><summary>In Amazon cloud <i>(click to expand)</i></summary>
 
-**Prerequisites:**
+Prerequisites:
 
-DLab can be deployed using one of the two options:
- - **First option: using IAM user.** DLab deployment script is executed on local machine and uses IAM user permissions to create resources in AWS.
- - **Second option (preferred) - using EC2 instance.** DLab deployment script is executed on EC2 instance prepared in advance and with attached IAM role. Deployment script uses the attached IAM role to create resources in AWS.
+DLab can be deployed using the following two methods:
+ - IAM user: DLab deployment script is executed on local machine and uses IAM user permissions to create resources in AWS.
+ - EC2 instance: DLab deployment script is executed on EC2 instance prepared in advance and with attached IAM role. 
+   Deployment script uses the attached IAM role to create resources in AWS.
 
-**Requirements for the first option:**  
+**'IAM user' method prerequisites:**  
  
- - IAM user with created AWS access key ID and secret access key. These keys are provided as arguments for the deployment script and are used to create resources in AWS.
+ - IAM user with created AWS access key ID and secret access key. These keys are provided as arguments for the 
+   deployment script and are used to create resources in AWS.
  - Amazon EC2 Key Pair. This key is system and is used for configuring DLab instances.
  - The following IAM [policy](#AWS_SSN_policy) should be attached to the IAM user in order to deploy DLab.
  
- **Requirements for the second option:**
+ **'EC2 instance' method prerequisites:**
  
  - Amazon EC2 Key Pair. This key is system and is used for configuring DLab instances.
  - EC2 instance where DLab deployment script is executed. 
  - IAM role with the following IAM [policy](#AWS_SSN_policy) should be attached to the EC2 instance. 
  
- **Requirements for both options(optional):**
+ **Optional prerequisites for both methods:**
   
-  - VPC ID. If VPC where DLab should be deployed is already in place, then "VPC ID" should be provided for deployment script. DLab instances are deployed in this VPC.
-  - Subnet ID. If Subnet where DLab should be deployed is already in place, then "Subnet ID" should be provided for deployment script. DLab SSN node and users' Edge nodes are deployed in this Subnet. 
+  - VPC ID. If VPC where DLab should be deployed is already in place, then "VPC ID" should be provided for deployment 
+    script. DLab instances are deployed in this VPC.
+  - Subnet ID. If Subnet where DLab should be deployed is already in place, then "Subnet ID" should be provided for 
+    deployment script. DLab SSN node and users' Edge nodes are deployed in this Subnet. 
  
- **DLab IAM Policy**
+ DLab IAM Policy
  <a name="AWS_SSN_policy"></a>
 ```
 {
@@ -333,19 +429,19 @@
 }
 ```
 
-**Preparation steps for preferred deployment option:**
+Preparation steps for deployment:
 
 - Create an EC2 instance with the following settings:
     - The instance should have access to Internet in order to install required prerequisites
     - The instance should have access to further DLab installation
     - AMI - Ubuntu 16.04
     - IAM role with [policy](#AWS_SSN_policy) should be assigned to the instance
-- Put SSH key file created through Amazon Console on the instance with the same name</details>
+- Put SSH key file created through Amazon Console on the instance with the same name
+- Install Git and clone DLab repository</details>
 
 <details><summary>In Azure cloud <i>(click to expand)</i></summary>
 
-
-**Prerequisites:**
+Prerequisites:
 
 - IAM user with Contributor permissions.
 - Service principal and JSON based auth file with clientId, clientSecret and tenantId.
@@ -354,7 +450,8 @@
 
 - Windows Azure Active Directory
 - Microsoft Graph
-- Windows Azure Service Management API
+- Windows Azure Service Management API</details>
+
 
 **Preparation steps for deployment:**
 
@@ -366,166 +463,85 @@
 
 <details><summary>In Google cloud (GCP) <i>(click to expand)</i></summary>
 
-**Prerequisites:**
+Prerequisites:
 
-- Service account and JSON auth file for it. In order to get JSON auth file, Key should be created for service account through Google cloud console.
+- IAM user
+- Service account and JSON auth file for it. In order to get JSON auth file, Key should be created for service account 
+through Google cloud console.
 - Google Cloud Storage JSON API should be enabled
 
-**Preparation steps for deployment:**
+Preparation steps for deployment:
 
-- Create a VM instance with the following settings:
+- Create an VM instance with the following settings:
     - The instance should have access to Internet in order to install required prerequisites
     - Boot disk OS Image - Ubuntu 16.04
 - Generate SSH key pair and rename private key with .pem extension
-- Put JSON auth file created through Google cloud console to users home directory</details>
+- Put JSON auth file created through Google cloud console to users home directory
+- Install Git and clone DLab repository</details>
 
 ### Executing deployment script
 
-To build SSN node using instance in the specific cloud platform, following steps should be executed:
+To build SSN node, following steps should be executed:
 
-- Connect to created at previous step instance via SSH and run the following commands:
+- Connect to the instance via SSH and run the following commands:
 
 ```
 sudo su
 apt-get update
 apt-get install git
-git clone https://github.com/apache/incubator-dlab.git -b v2.1.1
+git clone https://github.com/apache/incubator-dlab.git
 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
 add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
 apt-get update
 apt-cache policy docker-ce
 apt-get install -y docker-ce=17.06.2~ce-0~ubuntu
 usermod -a -G docker *username*
-apt-get install python-pip
+apt-get install -y python-pip
 pip install fabric==1.14.0
 cd incubator-dlab
 ```
+- Go to *dlab* directory
+- Run *infrastructure-provisioning/scripts/deploy_dlab.py* deployment script:
 
-- Run deployment script, which builds front-end and back-end part of DLab, creates SSN docker image and runs Docker container for creating SSN node:
+This python script will build front-end and back-end part of DLab, create SSN docker image and run Docker container 
+for creating SSN node.
+
+<details><summary>In Amazon cloud <i>(click to expand)</i></summary>
 
 ```
-/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --action create
-	--conf_service_base_name <value>
-	--conf_os_family <value>
-	--conf_cloud_provider <value>
-	--key_path <value>
-	--conf_key_name <value>
-	--ldap_dn <value> --ldap_ou <value> --ldap_service_username <value> --ldap_service_password <value>
-	[--conf_tag_resource_id <value>]
-	[--workspace_path <value>]
-	[--conf_network_type <value>]
-	[--conf_vpc_cidr <value>]
-	[--conf_vpc2_cidr <value>]
-	[--conf_allowed_ip_cidr <value>]
-	[--conf_user_subnets_range <value>]
-	[--conf_additional_tags <value>]
-	[--ssn_hosted_zone_name <value>]
-	[--ssn_hosted_zone_id <value>]
-	[--ssn_subdomain <value>]
-	[--ssn_assume_role_arn <value>]
-	[--ssl_cert_path <value> --ssl_key_path <value>]
-	[--aws_access_key <value> --aws_secret_access_key <value>]
-	[--aws_region', type=str <value> | --azure_region <value> | --gcp_region <value>]
-	[--aws_zone <value> | --gcp_zone <value>]
-	[--aws_vpc_id <value> | --azure_vpc_name <value> | --gcp_vpc_name <value>]
-	[--conf_duo_vpc_enable <value>]
-	[--aws_vpc2_id <value>]
-	[--aws_user_predefined_s3_policies <value>]
-	[--aws_peering_id <value>]
-	[--aws_subnet_id <value> | --azure_subnet_name <value> | --gcp_subnet_name <value>]
-	[--aws_security_groups_ids <value> | --azure_security_group_name <value> | --gcp_firewall_name <value>]
-	[--aws_ssn_instance_size <value> | --azure_ssn_instance_size <value> | --gcp_ssn_instance_size <value>]
-	[--aws_account_id <value>]
-	[--aws_billing_bucket <value>]
-	[--aws_report_path <value>]
-	[--azure_resource_group_name <value>]
-	[--azure_auth_path <value>]
-	[--azure_datalake_enable <value>]
-	[--azure_ad_group_id <value>]
-	[--azure_offer_number <value>]
-	[--azure_currency <value>]
-	[--azure_locale <value>
-	[--azure_application_id <value>]
-	[--azure_validate_permission_scope <value>]
-	[--azure_oauth2_enabled <value>]
-	[--azure_region_info <value>]
-	[--azure_source_vpc_name <value>]
-	[--azure_source_resource_group_name <value>]
-	[--gcp_project_id --gcp_service_account_path <value>]
-	[--dlab_id <value>]
-	[--usage_date <value>]
-	[--product <value>]
-	[--usage_type <value>]
-	[--usage <value>]
-	[--cost <value>]
-	[--resource_id <value>]
-	[--tags <value>]
+/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --conf_service_base_name dlab-test --aws_access_key XXXXXXX --aws_secret_access_key XXXXXXXXXX --aws_region xx-xxxxx-x --conf_os_family debian --conf_cloud_provider aws --aws_vpc_id vpc-xxxxx --aws_subnet_id subnet-xxxxx --aws_security_groups_ids sg-xxxxx,sg-xxxx --key_path /path/to/key/ --conf_key_name key_name --conf_tag_resource_id dlab --aws_account_id xxxxxxxx --aws_billing_bucket billing_bucket --aws_report_path /billing/directory/ --action create
 ```
 
-<details><summary>List of common options and their descriptions: <i>(click to expand)</i></summary>
+List of parameters for SSN node deployment:
 
+| Parameter                 | Description/Value                                                                       |
+|---------------------------|-----------------------------------------------------------------------------------------|
+| conf\_service\_base\_name | Any infrastructure value (should be unique if multiple SSN’s have been deployed before) |
+| aws\_access\_key          | AWS user access key                                                                     |
+| aws\_secret\_access\_key  | AWS user secret access key                                                              |
+| aws\_region               | AWS region                                                                              |
+| conf\_os\_family          | Name of the Linux distributive family, which is supported by DLab (Debian/RedHat)       |
+| conf\_cloud\_provider     | Name of the cloud provider, which is supported by DLab (AWS)
+| conf\_duo\_vpc\_enable    | "true" - for installing DLab into two Virtual Private Clouds (VPCs) or "false" - for installing DLab into one VPC. Also this parameter isn't required when deploy DLab in one VPC|
+| aws\_vpc\_id              | ID of the VPC (optional)                                                    |
+| aws\_subnet\_id           | ID of the public subnet (optional)                                                                  |
+| aws\_security\_groups\_ids| One or more ID\`s of AWS Security Groups, which will be assigned to SSN node (optional)             |
+| key\_path                 | Path to admin key (without key name)                                                    |
+| conf\_key\_name           | Name of the uploaded SSH key file (without “.pem” extension)                            |
+| conf\_tag\_resource\_id   | The name of tag for billing reports                                                     |
+| aws\_account\_id          | The The ID of Amazon account                                                            |
+| aws\_billing\_bucket      | The name of S3 bucket where billing reports will be placed                              |
+| aws\_report\_path         | The path to billing reports directory in S3 bucket. This parameter isn't required when billing reports are placed in the root of S3 bucket. |
+| action                    | In case of SSN node creation, this parameter should be set to “create”|
+| workspace\_path           | Path to DLab sources root
+| conf\_image\_enabled      | Enable or Disable creating image at first time |
 
-| Option                               | Description                                                               |
-| :---                                 |   :---                                                                    |
-| --conf_service_base_name (string)    | Any infrastructure value (should be unique if multiple SSN’s have been deployed before) |
-| --conf\_os\_family (string)          | Name of the Linux distributive family, which is supported by DLab         |
-| --conf\_cloud\_provider (string)     | Name of the cloud provider, which is supported by DLab (AWS, GCP or Azure)|
-| --key\_path (string)                 | Path to admin key (without key name)                                      |
-| --conf\_key\_name (string)           | Name of the uploaded SSH key file (without “.pem” extension)              |
-| --conf\_tag\_resource\_id (string)   | The name of tag for billing reports                                       |
-| --workspace\_path (string)           | Path to DLab sources root                                                 |
-| --conf_network_type (string)         | Define in which network DLab will be deployed. Possible options: public \| private |
-| --conf_vpc_cidr (string)             | CIDR of VPC                                                               |
-| --conf_vpc2_cidr (string)            | CIDR of secondary VPC                                                     |
-| --conf_allowed_ip_cidr (string)      | Comma-separated CIDR of IPs which will have access to SSN                 |
-| --conf_user_subnets_range (string)   | Range of subnets which will be using for users environments. For example: 10.10.0.0/24 - 10.10.10.0/24 |
-| --ssl_cert_path (string)             | Full path to SSL certificate                                              |
-| --ssl_key_path (string)              | Full path to key for SSL certificate                                      |
-| --ssn_hosted_zone_name (string)      | Name of hosted zone                                                       |
-| --ssn_hosted_zone_id (string)        | ID of hosted zone                                                         |
-| --ssn_subdomain (string)             | Subdomain name                                                            |
-| --ssn_assume_role_arn (string)       | Role ARN for creating Route53 record in different AWS account             |
-| --dlab_id (string)                   | Column name in report file that contains dlab id tag                      |
-| --usage_date (string)                | Column name in report file that contains usage date tag                   |
-| --product (string)                   | Column name in report file that contains product name tag                 |
-| --usage_type (string)                | Column name in report file that contains usage type tag                   |
-| --usage (string)                     | Column name in report file that contains usage tag                        |
-| --cost (string)                      | Column name in report file that contains cost tag                         |
-| --resource_id (string)               | Column name in report file that contains dlab resource id tag             |
-| --tags (string)                      | Column name in report file that contains dlab tags                        |
-| --ldap_dn (string)                   | Ldap distinguished name                                                   | 
-| --ldap_ou (string)                   | Ldap organisation unit                                                    |
-| --ldap_service_username (string)     | Ldap service user name                                                    |
-| --ldap_service_password (string)     | Ldap password for admin user                                              |
-</details>
-
-<details><summary>List of options and expample for AWS: <i>(click to expand)</i></summary>
-
-
-| Option                               | Description                                                               |
-| :---                                 |   :---                                                                    |
-| --aws_access_key (string)            | AWS user access key                                                       |
-| --aws_secret_access_key (string)     | AWS user secret access key                                                |
-| --aws_region (string)                | AWS region                                                                |
-| --aws_zone (string)                  | AWS zone                                                                  |
-| --aws\_vpc\_id (string)              | ID of the VPC                                                             |
-| --conf\_duo\_vpc\_enable (boolean)   | "true" - for installing DLab into two Virtual Private Clouds (VPCs) or "false" - for installing DLab into one VPC. Also this parameter isn't required when deploy DLab in one VPC |
-| --aws_vpc2_id (string)               | Secondary AWS VPC ID                                                      |
-| --aws\_subnet\_id (string)           | ID of the public subnet                                                   |
-| --aws\_security\_groups\_ids (list)  | One or more ID`s of AWS Security Groups, which will be assigned to SSN node |
-| --aws\_account\_id (string)          | The The ID of Amazon account                                              |   
-| --aws\_billing\_bucket (string)      | The name of S3 bucket where billing reports will be placed                |
-| --aws\_report\_path (string)         | The path to billing reports directory                                     |
-| --aws_peering_id (string)            | Amazon peering connection id                                              |
-| --aws_ssn_instance_size (string)     | The SSN instance shape                                                    |
-| --conf_additional_tags (list)        | Additional tags in format "Key1:Value1;Key2:Value2"                       |
-
-**Note:** If the following parameters are not specified, they creates automatically:
+**Note:** If the following parameters are not specified, they will be created automatically:
 -   aws\_vpc\_id
 -   aws\_subnet\_id
 -   aws\_sg\_ids
 
-**Note:** If billing won't be using, the following parameters are not required:
+**Note:** If billing won't be used, the following parameters are not required:
 -   aws\_account\_id
 -   aws\_billing\_bucket
 -   aws\_report\_path
@@ -537,79 +553,85 @@
 -   IAM role and EC2 Instance Profile for SSN
 -   Security Group for SSN node (if it was specified, script will attach the provided one)
 -   VPC, Subnet (if they have not been specified) for SSN and EDGE nodes
--   S3 bucket – its name will be \<service\_base\_name\>-ssn-bucket. This bucket will contain necessary dependencies and configuration files for Notebook nodes (such as .jar files, YARN configuration, etc.)
--   S3 bucket for for collaboration between Dlab users. Its name will be \<service\_base\_name\>-shared-bucket
+   S3 bucket – its name will be \<service\_base\_name\>-ssn-bucket. This bucket will contain necessary dependencies and configuration files for Notebook nodes (such as .jar files, YARN configuration, etc.)
+-   S3 bucket for for collaboration between Dlab users. Its name will be \<service\_base\_name\>-\<endpoint\_name\>-shared-bucket</details>
 
-**Example for preferred deployment option:**
+<details><summary>In Azure cloud <i>(click to expand)</i></summary>
+
 ```
-/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --action create
-	--conf_service_base_name dlab-sample 
- 	--aws_region us-west-2 
-	--conf_os_family debian 
-	--conf_cloud_provider aws 
-	--aws_vpc_id vpc-xxxxxxxx 
-	--aws_subnet_id subnet-xxxxxxxx 
-	--aws_security_groups_ids sg-xxxxxxxx,sg-xxxxxxxx 
-	--key_path /home/ubuntu/key/ 
-	--conf_key_name dlab_key 
-	--conf_tag_resource_id user:tag 
+/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --conf_service_base_name dlab_test --azure_region westus2 --conf_os_family debian --conf_cloud_provider azure --azure_vpc_name vpc-test --azure_subnet_name subnet-test --azure_security_group_name sg-test1,sg-test2 --key_path /root/ --conf_key_name Test --azure_auth_path /dir/file.json  --action create
 ```
-</details>
 
-<details><summary>List of options and example for Azure: <i>(click to expand)</i></summary>
+List of parameters for SSN node deployment:
 
+| Parameter                         | Description/Value                                                                       |
+|-----------------------------------|-----------------------------------------------------------------------------------------|
+| conf\_service\_base\_name         | Any infrastructure value (should be unique if multiple SSN’s have been deployed before) |
+| azure\_resource\_group\_name      | Resource group name (can be the same as service base name                             |
+| azure\_region                     | Azure region                                                                            |
+| conf\_os\_family                  | Name of the Linux distributive family, which is supported by DLab (Debian/RedHat)       |
+| conf\_cloud\_provider             | Name of the cloud provider, which is supported by DLab (Azure)                          |
+| azure\_vpc\_name                  | Name of the Virtual Network (VN) (optional)                                                         |
+| azure\_subnet\_name               | Name of the Azure subnet (optional)                                                                 |
+| azure\_security\_groups\_name     | One or more Name\`s of Azure Security Groups, which will be assigned to SSN node (optional)         |
+| azure\_ssn\_instance\_size        | Instance size of SSN instance in Azure                                                  |
+| key\_path                         | Path to admin key (without key name)                                                    |
+| conf\_key\_name                   | Name of the uploaded SSH key file (without “.pem” extension)                            |
+| azure\_auth\_path                 | Full path to auth json file                                                             |
+| azure\_offer\_number              | Azure offer id number                                                                   |
+| azure\_currency                   | Currency that is used for billing information(e.g. USD)                                 |
+| azure\_locale                     | Locale that is used for billing information(e.g. en-US)                                 |
+| azure\_region\_info               | Region info that is used for billing information(e.g. US)                               |
+| azure\_datalake\_enable           | Support of Azure Data Lake (true/false)                                                 |
+| azure\_oauth2\_enabled            | Defines if Azure OAuth2 authentication mechanisms is enabled(true/false)                |
+| azure\_validate\_permission\_scope| Defines if DLab verifies user's permission to the configured resource(scope) during login with OAuth2 (true/false). If Data Lake is enabled default scope is Data Lake Store Account, else Resource Group, where DLab is deployed, is default scope. If user does not have any role in scope he/she is forbidden to log in
+| azure\_application\_id            | Azure application ID that is used to log in users in DLab                                                     |
+| azure\_ad\_group\_id              | ID of group in Active directory whose members have full access to shared folder in Azure Data Lake Store                                                                          |
+| action                            | In case of SSN node creation, this parameter should be set to “create”                  |
+| conf\_image\_enabled      | Enable or Disable creating image at first time |
 
-| Option                                       | Description                                                        |
-| ---                                          |    ---                                                             |                                            
-| --azure\_resource\_group\_name (string)      | Resource group name (could be the same as service base name)        |
-| --azure\_region (string)                     | Azure region                                                       |
-| --azure\_vpc\_name (string)                  | Name of the Virtual Network (VN)                                   |
-| --azure\_subnet\_name (string)               | Name of the Azure subnet                                           |
-| --azure\_security\_groups\_name (list)       | One or more Name\`s of Azure Security Groups, which will be assigned to SSN node |
-| --azure\_ssn\_instance\_size (string)        | Instance size of SSN instance in Azure                             |
-| --azure\_auth\_path (string)                 | Full path to auth json file                                        |
-| --azure\_offer\_number (string)              | Azure offer id number                                              |
-| --azure\_currency (string)                   | Currency that is used for billing information(e.g. USD)            |
-| --azure\_locale (string)                     | Locale that is used for billing information(e.g. en-US)            |
-| --azure\_region\_info (string)               | Region info that is used for billing information(e.g. US)          |
-| --azure\_datalake\_enable (boolean)          | Support of Azure Data Lake (true/false)                            |
-| --azure\_oauth2\_enabled (string)            | Defines if Azure OAuth2 authentication mechanisms is enabled(true/false) |
-| --azure\_validate\_permission\_scope (string)| Defines if DLab verifies user's permission to the configured resource(scope) during login with OAuth2 (true/false). If Data Lake is enabled default scope is Data Lake Store Account, else Resource Group, where DLab is deployed, is default scope. If user does not have any role in scope he/she is forbidden to log in |
-| --azure\_application\_id (string)            | Azure application ID that is used to log in users in DLab          |
-| --azure\_ad\_group\_id (string)              | ID of group in Active directory whose members have full access to shared folder in Azure Data Lake Store |
-| --azure_oauth2_enabled (boolean)             | Using OAuth2 for logging in DLab                                   |
-| --azure_source_vpc_name (string)             | Azure VPC source Name                                              |
-| --azure_source_resource_group_name (string)  | Azure source resource group                                        |
+**Note:** If the following parameters are not specified, they will be created automatically:
 
-**Note:** If the following parameters are not specified, they creates automatically:
 -   azure\_vpc\_nam
 -   azure\_subnet\_name
 -   azure\_security\_groups\_name
 
 **Note:** Billing configuration:
 
-To know azure\_offer\_number open [Azure Portal](https://portal.azure.com), go to Subscriptions and open yours, then click Overview and you should see it under Offer ID property:
+To know azure\_offer\_number open [Azure Portal](https://portal.azure.com), go to Subscriptions and open yours, then 
+click Overview and you should see it under Offer ID property:
 
 ![Azure offer number](doc/azure_offer_number.png)
 
-Please see [RateCard API](https://msdn.microsoft.com/en-us/library/mt219004.aspx) to get more details about azure\_offer\_number,
-azure\_currency, azure\_locale, azure\_region_info. These DLab deploy properties correspond to RateCard API request parameters.
+Please see [RateCard API](https://msdn.microsoft.com/en-us/library/mt219004.aspx) to get more details about 
+azure\_offer\_number, azure\_currency, azure\_locale, azure\_region_info. These DLab deploy properties correspond to 
+RateCard API request parameters.
 
-To have working billing functionality please review Billing configuration note and use proper parameters for SSN node deployment.
+To have working billing functionality please review Billing configuration note and use proper parameters for SSN node 
+deployment.
 
-To use Data Lake Store please review Azure Data Lake usage pre-requisites note and use proper parameters for SSN node deployment.
+To use Data Lake Store please review Azure Data Lake usage pre-requisites note and use proper parameters for SSN node 
+deployment.
 
 **Note:** Azure Data Lake usage pre-requisites:
 
 1. Configure application in Azure portal and grant proper permissions to it.
 - Open *Azure Active Directory* tab, then *App registrations* and click *New application registration*
-- Fill in ui form with the following parameters *Name* - put name of the new application, *Application type* - select Native, *Sign-on URL* put any valid url as it will be updated later
-- Grant proper permissions to the application. Select the application you just created on *App registration* view, then click *Required permissions*, then *Add->Select an API-> In search field type MicrosoftAzureQueryService* and press *Select*, then check the box *Have full access to the Azure Data Lake service* and save the changes. Repeat the same actions for *Windows Azure Active Directory* API (available on *Required permissions->Add->Select an API*) and the box *Sign in and read user profile*
+- Fill in ui form with the following parameters *Name* - put name of the new application, *Application type* - select 
+  Native, *Sign-on URL* put any valid url as it will be updated later
+- Grant proper permissions to the application. Select the application you just created on *App registration* view, then 
+  click *Required permissions*, then *Add->Select an API-> In search field type MicrosoftAzureQueryService* and press 
+  *Select*, then check the box *Have full access to the Azure Data Lake service* and save the changes. Repeat the same 
+  actions for *Windows Azure Active Directory* API (available on *Required permissions->Add->Select an API*) and the 
+  box *Sign in and read user profile*
 - Get *Application ID* from application properties  it will be used as azure_application_id for deploy_dlap.py script
-2. Usage of Data Lake resource predicts shared folder where all users can write or read any data. To manage access to this folder please create ot use existing group in Active Directory. All users from this group will have RW access to the shared folder. Put ID(in Active Directory) of the group as *azure_ad_group_id* parameter to deploy_dlab.py script
-3. After execution of deploy_dlab.py script go to the application created in step 1 and change *Redirect URIs* value to the https://SSN_HOSTNAME/ where SSN_HOSTNAME - SSN node hostname
+2. Usage of Data Lake resource predicts shared folder where all users can write or read any data. To manage access to 
+   this folder please create ot use existing group in Active Directory. All users from this group will have RW access to 
+   the shared folder. Put ID(in Active Directory) of the group as *azure_ad_group_id* parameter to deploy_dlab.py script
+3. After execution of deploy_dlab.py script go to the application created in step 1 and change *Redirect URIs* value to 
+   the https://SSN_HOSTNAME/ where SSN_HOSTNAME - SSN node hostname
 
-**SSN deployment creates following Azure resources:**
+After SSN node deployment following Azure resources will be created:
 
 -   Resource group where all DLAb resources will be provisioned
 -   SSN Virtual machine
@@ -619,74 +641,60 @@
 -   Virtual network and Subnet (if they have not been specified) for SSN and EDGE nodes
 -   Storage account and blob container for necessary further dependencies and configuration files for Notebook nodes (such as .jar files, YARN configuration, etc.)
 -   Storage account and blob container for collaboration between Dlab users
--   If support of Data Lake is enabled: Data Lake and shared directory will be created
+-   If support of Data Lake is enabled: Data Lake and shared directory will be created</details>
 
-**Example:**
+<details><summary>In Google cloud (GCP) <i>(click to expand)</i></summary>
+
 ```
-/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --action create
-	--conf_service_base_name dlab-sample  
-	--azure_region westus2 
-	--conf_os_family debian 
-	--conf_cloud_provider azure 
-	--azure_vpc_name dlab-sample-vpc 
-	--azure_subnet_name dlab-sample-subnet 
-	--azure_security_group_name dlab-sample1-sg,dlab-sample2-sg 
-	--key_path /root/ 
-	--conf_key_name dlab_key 
-	--azure_auth_path /home/ubuntu/dlab.json
+/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --conf_service_base_name dlab-test --gcp_region xx-xxxxx --gcp_zone xxx-xxxxx-x --conf_os_family debian --conf_cloud_provider gcp --key_path /path/to/key/ --conf_key_name key_name --gcp_ssn_instance_size n1-standard-1 --gcp_project_id project_id --gcp_service_account_path /path/to/auth/file.json --action create
 ```
-</details>
 
-<details><summary>List of options and example for GCP: <i>(click to expand)</i></summary>
+List of parameters for SSN node deployment:
 
+| Parameter                    | Description/Value                                                                     |
+|------------------------------|---------------------------------------------------------------------------------------|
+| conf\_service\_base\_name    | Any infrastructure value (should be unique if multiple SSN’s have been deployed before)|
+| gcp\_region                  | GCP region                                                                            |
+| gcp\_zone                    | GCP zone                                                                              |
+| conf\_os\_family             | Name of the Linux distributive family, which is supported by DLab (Debian/RedHat)     |
+| conf\_cloud\_provider        | Name of the cloud provider, which is supported by DLab (GCP)                          |
+| gcp\_vpc\_name               | Name of the Virtual Network (VN) (optional)                                           |
+| gcp\_subnet\_name            | Name of the GCP subnet (optional)                                                     |
+| gcp\_firewall\_name          | One or more Name\`s of GCP Security Groups, which will be assigned to SSN node (optional)|
+| key\_path                    | Path to admin key (without key name)                                                  |
+| conf\_key\_name              | Name of the uploaded SSH key file (without “.pem” extension)                          |
+| gcp\_service\_account\_path  | Full path to auth json file                                                           |
+| gcp\_ssn\_instance\_size     | Instance size of SSN instance in GCP                                                  |
+| gcp\_project\_id             | ID of GCP project                                                                     |
+| action                       | In case of SSN node creation, this parameter should be set to “create”                |
+| conf\_image\_enabled      | Enable or Disable creating image at first time |
+| billing\_dataset\_name | Name of GCP dataset (BigQuery service) |
 
-| Option                                 | Description                                                              |
-| ---                                    |    ---                                                                   |                                            
-| --gcp\_region (string)                 | GCP region                                                               |
-| --gcp\_zone (string)                   | GCP zone                                                                 |
-| --gcp\_vpc\_name (string)              | Name of the Virtual Network (VN)                                         |
-| --gcp\_subnet\_name (string)           | Name of the GCP subnet                                                   |
-| --gcp\_firewall\_name (list)         | One or more Name\`s of GCP Security Groups, which will be assigned to SSN node |
-| --gcp\_service\_account\_path (string) | Full path to auth json file                                              |
-| --gcp\_ssn\_instance\_size (string)    | Instance size of SSN instance in GCP                                     |
-| --gcp\_project\_id (string)            | ID of GCP project                                                        |
+**Note:** If you gonna use Dataproc cluster, be aware that Dataproc has limited availability in GCP regions. 
+[Cloud Dataproc availability by Region in GCP](https://cloud.google.com/about/locations/)
 
-**Note:** If you gonna use Dataproc cluster, be aware that Dataproc has limited availability in GCP regions. [Cloud Dataproc availability by Region in GCP](https://cloud.google.com/about/locations/)
-
-**SSN deployment creates following GCP resources:**
+After SSN node deployment following GCP resources will be created:
 
 -   SSN VM instance
 -   External IP address for SSN instance
 -   IAM role and Service account for SSN
 -   Security Groups for SSN node (if it was specified, script will attach the provided one)
 -   VPC, Subnet (if they have not been specified) for SSN and EDGE nodes
--   Bucket – its name will be \<service\_base\_name\>-ssn-bucket. This bucket will contain necessary dependencies and configuration files for Notebook nodes (such as .jar files, YARN configuration, etc.)
--   Bucket for for collaboration between Dlab users. Its name will be \<service\_base\_name\>-shared-bucket
-
-**Example:**
-```
-/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --action create
-	--conf_service_base_name dlab-sample 
-	--gcp_region us-west1
-	--gcp_zone us-west1-a 
-	--conf_os_family debian 
-	--conf_cloud_provider gcp 
-	--key_path /root/ 
-	--conf_key_name dlab_key 
-	--gcp_ssn_instance_size n1-standard-1 
-	--gcp_project_id xxx-xxx-xxxx-xxxx-xxxxxx 
-	--gcp_service_account_path /home/ubuntu/gcp/service_account.json 
-```
-</details>
+-   Bucket for for collaboration between Dlab users. Its name will be 
+    \<service\_base\_name\>-\<endpoint\_name\>-shared-bucket</details>
 
 ### Terminating Self-Service Node
 
-Terminating SSN node also removes all nodes and components related to it. Basically, terminating Self-service node terminates all DLab’s infrastructure.
-
-**Example of command for terminating DLab environment:**
+Terminating SSN node will also remove all nodes and components related to it. Basically, terminating Self-service node 
+will terminate all DLab’s infrastructure.
+Example of command for terminating DLab environment:
 
 <details><summary>In Amazon <i>(click to expand)</i></summary>
 
+```
+/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --conf_service_base_name dlab-test --aws_access_key XXXXXXX --aws_secret_access_key XXXXXXXX --aws_region xx-xxxxx-x --key_path /path/to/key/ --conf_key_name key_name --conf_os_family debian --conf_cloud_provider aws --action terminate
+```
+List of parameters for SSN node termination:
 
 | Parameter                  | Description/Value                                                                  |
 |----------------------------|------------------------------------------------------------------------------------|
@@ -699,23 +707,14 @@
 | conf\_os\_family           | Name of the Linux distributive family, which is supported by DLab (Debian/RedHat)  |
 | conf\_cloud\_provider      | Name of the cloud provider, which is supported by DLab (AWS)                       |
 | action                     | terminate                                                                          |
-
-**Example:**
-```
-/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --action terminate
-	--aws_access_key=XXXXXXXXXXXXXXXXXXXX
-	--aws_secret_access_key=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-	--conf_service_base_name dlab-sample 
-	--aws_region us-west-2 
-	--key_path /home/ubuntu/key/ 
-	--conf_key_name dlab_key 
-	--conf_os_family debian 
-	--conf_cloud_provider aws 
-```
 </details>
 
 <details><summary>In Azure <i>(click to expand)</i></summary>
 
+```
+/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --conf_service_base_name dlab-test --azure_vpc_name vpc-test --azure_resource_group_name resource-group-test --azure_region westus2 --key_path /root/ --conf_key_name Test --conf_os_family debian --conf_cloud_provider azure --azure_auth_path /dir/file.json --action terminate
+```
+List of parameters for SSN node termination:
 
 | Parameter                  | Description/Value                                                                  |
 |----------------------------|------------------------------------------------------------------------------------|
@@ -728,60 +727,48 @@
 | conf\_key\_name            | Name of the uploaded SSH key file (without “.pem” extension)                       |
 | azure\_auth\_path          | Full path to auth json file                                                        |
 | action                     | terminate                                                                          |
-
-**Example:**
-```
-/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --action terminate
-	--conf_service_base_name dlab-sample 
-	--azure_vpc_name dlab-sample-vpc 
-	--azure_resource_group_name dlab-sample 
-	--azure_region westus2 
-	--key_path /root/ 
-	--conf_key_name dlab_key 
-	--conf_os_family debian 
-	--conf_cloud_provider azure 
-	--azure_auth_path /home/ubuntu/dlab.json
-```
 </details>
 
 <details><summary>In Google cloud <i>(click to expand)</i></summary>
 
-
-| Parameter                    | Description/Value                                                                       |
-|------------------------------|-----------------------------------------------------------------------------------------|
-| conf\_service\_base\_name    | Any infrastructure value (should be unique if multiple SSN’s have been deployed before) |
-| gcp\_region                  | GCP region                                                                              |
-| gcp\_zone                    | GCP zone                                                                                |
-| conf\_os\_family             | Name of the Linux distributive family, which is supported by DLab (Debian/RedHat)       |
-| conf\_cloud\_provider        | Name of the cloud provider, which is supported by DLab (GCP)                            |
-| key\_path                    | Path to admin key (without key name)                                                    |
-| conf\_key\_name              | Name of the uploaded SSH key file (without “.pem” extension)                            |
-| gcp\_service\_account\_path  | Full path to auth json file                                                             |
-| gcp\_project\_id             | ID of GCP project                                                                       |
-| action                       | In case of SSN node termination, this parameter should be set to “terminate”            |
-
-**Example:**
 ```
-/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --action terminate
-	--gcp_project_id xxx-xxx-xxxx-xxxx-xxxxxx 
-	--conf_service_base_name dlab-sample 
-	--gcp_region us-west1 
-	--gcp_zone us-west1-a 
-	--key_path /root/ 
-	--conf_key_name dlab_key 
-	--conf_os_family debian 
-	--conf_cloud_provider gcp 
-	--gcp_service_account_path /home/ubuntu/gcp/service_account.json 
+/usr/bin/python infrastructure-provisioning/scripts/deploy_dlab.py --gcp_project_id project_id --conf_service_base_name dlab-test --gcp_region xx-xxxxx --gcp_zone xx-xxxxx-x --key_path /path/to/key/ --conf_key_name key_name --conf_os_family debian --conf_cloud_provider gcp --gcp_service_account_path /path/to/auth/file.json --action terminate
 ```
+List of parameters for SSN node termination:
+
+| Parameter                    | Description/Value                                                                     |
+|------------------------------|---------------------------------------------------------------------------------------|
+| conf\_service\_base\_name    | Any infrastructure value (should be unique if multiple SSN’s have been deployed before)|
+| gcp\_region                  | GCP region                                                                            |
+| gcp\_zone                    | GCP zone                                                                              |
+| conf\_os\_family             | Name of the Linux distributive family, which is supported by DLab (Debian/RedHat)     |
+| conf\_cloud\_provider        | Name of the cloud provider, which is supported by DLab (GCP)                          |
+| gcp\_vpc\_name               | Name of the Virtual Network (VN) (optional)                                           |
+| gcp\_subnet\_name            | Name of the GCP subnet (optional)                                                     |
+| key\_path                    | Path to admin key (without key name)                                                  |
+| conf\_key\_name              | Name of the uploaded SSH key file (without “.pem” extension)                          |
+| gcp\_service\_account\_path  | Full path to auth json file                                                           |
+| gcp\_project\_id             | ID of GCP project                                                                     |
+| action                       | In case of SSN node termination, this parameter should be set to “terminate”          |
+
+Note: It is required to enter gcp_vpc_name and gcp_subnet_name parameters if Self-Service Node was deployed in 
+pre-defined VPC and Subnet.
 </details>
 
 ## Edge Node <a name="Edge_Node"></a>
 
-Gateway node (or an Edge node) is an instance(virtual machine) provisioned in a public subnet. It serves as an entry point for accessing user’s personal analytical environment. It is created by an end-user, whose public key will be uploaded there. Only via Edge node, DLab user can access such application resources as notebook servers and dataengine clusters. Also, Edge Node is used to setup SOCKS proxy to access notebook servers via Web UI and SSH. Elastic(Static) IP address is assigned to an Edge Node. In case Edge node instance has been removed by mistake, there is an option to re-create it and Edge node IP address won’t change.
+Gateway node (or an Edge node) is an instance(virtual machine) provisioned in a public subnet. It serves as an entry 
+point for accessing user’s personal analytical environment. It is created by an end-user, whose public key will be 
+uploaded there. Only via Edge node, DLab user can access such application resources as notebook servers and dataengine 
+clusters. Also, Edge Node is used to setup SOCKS proxy to access notebook servers via Web UI and SSH. Elastic(Static) 
+IP address is assigned to an Edge Node. 
 
 ### Create
 
-In order to create Edge node using DLab Web UI – login and, click on the button “Upload” (Depending on authorization provider that was chosen on deployment stage, user may be taken from [LDAP](#LDAP_Authentication) or from [Azure AD (Oauth2)](#Azure_OAuth2_Authentication)). Choose user’s SSH public key and after that click on the button “Create”. Edge node will be deployed and corresponding instance (virtual machine) will be started.
+In order to create Edge node using DLab Web UI – login and, click on the button “Upload” (Depending on authorization 
+provider that was chosen on deployment stage, user may be taken from [LDAP](#LDAP_Authentication) or from 
+[Azure AD (Oauth2)](#Azure_OAuth2_Authentication)). Choose user’s SSH public key and after that click on the button 
+“Create”. Edge node will be deployed and corresponding instance (virtual machine) will be started.
 
 <details><summary>In Amazon <i>(click to expand)</i></summary>
 
@@ -825,7 +812,8 @@
 -   Security Group for all further user's Notebook instances
 -   Security Groups for all further user's master nodes of data engine cluster
 -   Security Groups for all further user's slave nodes of data engine cluster
--   User's private subnet. All further nodes (Notebooks, data engine clusters) will be provisioned in different subnet than SSN.
+-   User's private subnet. All further nodes (Notebooks, data engine clusters) will be provisioned in different subnet 
+    than SSN.
 -   User's storage account and blob container
 
 List of parameters for Edge node creation:
@@ -853,7 +841,8 @@
 -   Security Group for all further user's Notebook instances
 -   Security Groups for all further user's master nodes of data engine cluster
 -   Security Groups for all further user's slave nodes of data engine cluster
--   User's private subnet. All further nodes (Notebooks, data engine clusters) will be provisioned in different subnet than SSN.
+-   User's private subnet. All further nodes (Notebooks, data engine clusters) will be provisioned in different subnet 
+    than SSN.
 -   User's bucket
 
 List of parameters for Edge node creation:
@@ -875,7 +864,8 @@
 
 ### Start/Stop <a name=""></a>
 
-To start/stop Edge node, click on the button which looks like a cycle on the top right corner, then click on the button which is located in “Action” field and in the drop-down menu click on the appropriate action.
+To start/stop Edge node, click on the button which looks like a cycle on the top right corner, then click on the button 
+which is located in “Action” field and in the drop-down menu click on the appropriate action.
 
 <details><summary>In Amazon <i>(click to expand)</i></summary>
 
@@ -929,72 +919,19 @@
 | action                         | start/stop                                                                        |
 </details>
 
-### Recreate <a name=""></a>
-
-In case Edge node was damaged, or terminated manually, there is an option to re-create it.
-
-If Edge node was removed for some reason, to re-create it, click on the status button close to logged in users’s name (top right corner of the screen).Then click on gear icon in Actions column and choose “Recreate”.
-
-List of parameters for Edge node recreation:
-
-<details><summary>In Amazon <i>(click to expand)</i></summary>
-
-| Parameter                  | Description/Value                                                                 |
-|----------------------------|-----------------------------------------------------------------------------------|
-| conf\_resource             | edge                                                                              |
-| conf\_os\_family           | Name of the Linux distributive family, which is supported by DLAB (Debian/RedHat) |
-| conf\_service\_base\_name  | Unique infrastructure value, specified during SSN deployment                      |
-| conf\_key\_name            | Name of the uploaded SSH key file (without ".pem")                                |
-| edge\_user\_name           | Name of the user                                                                  |
-| aws\_vpc\_id               | ID of AWS VPC where infrastructure is being deployed                              |
-| aws\_region                | AWS region where infrastructure was deployed                                      |
-| aws\_security\_groups\_ids | ID of the SSN instance's AWS security group                                       |
-| aws\_subnet\_id            | ID of the AWS public subnet where Edge was deployed                               |
-| edge\_elastic\_ip          | AWS Elastic IP address which was associated to Edge node                          |
-| conf\_tag\_resource\_id    | The name of tag for billing reports                                               |
-| action                     | Create                                                                            |
-</details>
-
-<details><summary>In Azure <i>(click to expand)</i></summary>
-
-| Parameter                    | Description/Value                                                                 |
-|------------------------------|-----------------------------------------------------------------------------------|
-| conf\_resource               | edge                                                                              |
-| conf\_os\_family             | Name of the Linux distributive family, which is supported by DLAB (Debian/RedHat) |
-| conf\_service\_base\_name    | Unique infrastructure value, specified during SSN deployment                      |
-| conf\_key\_name              | Name of the uploaded SSH key file (without ".pem")                                |
-| edge\_user\_name             | Name of the user                                                                  |
-| azure\_vpc\_name             | NAme of Azure Virtual network where all infrastructure is being deployed          |
-| azure\_region                | Azure region where all infrastructure was deployed                                |
-| azure\_resource\_group\_name | Name of the resource group where all DLAb resources are being provisioned         |
-| azure\_subnet\_name          | Name of the Azure public subnet where Edge was deployed                           |
-| action                       | Create                                                                            |
-</details>
-
-<details><summary>In Google cloud <i>(click to expand)</i></summary>
-
-| Parameter                  | Description/Value                                                                     |
-|--------------------------------|-----------------------------------------------------------------------------------|
-| conf\_resource                 | edge                                                                              |
-| conf\_os\_family               | Name of the Linux distributive family, which is supported by DLAB (debian/redhat) |
-| conf\_service\_base\_name      | Unique infrastructure value, specified during SSN deployment                      |
-| conf\_key\_name                | Name of the uploaded SSH key file (without ".pem")                                |
-| edge\_user\_name               | Name of the user                                                                  |
-| gcp\_region                    | GCP region where infrastructure was deployed                                      |
-| gcp\_zone                      | GCP zone where infrastructure was deployed                                        |
-| gcp\_vpc\_name                 | Name of Azure Virtual network where all infrastructure is being deployed          |
-| gcp\_subnet\_name              | Name of the Azure public subnet where Edge will be deployed                       |
-| gcp\_project\_id               | ID of GCP project                                                                 |
-| action                         | create                                                                            |
-</details>
-
 ## Notebook node <a name="Notebook_node"></a>
 
-Notebook node is an instance (virtual machine), with preinstalled analytical software, needed dependencies and with pre-configured kernels and interpreters. It is the main part of personal analytical environment, which is setup by a data scientist. It can be Created, Stopped and Terminated. To support variety of analytical needs - Notebook node can be provisioned on any of cloud supported instance shape for your particular region. From analytical software, which is already pre-installed on a notebook node, end users can access (read/write) data stored on buckets/containers.
+Notebook node is an instance (virtual machine), with preinstalled analytical software, needed dependencies and with 
+pre-configured kernels and interpreters. It is the main part of personal analytical environment, which is setup by a 
+data scientist. It can be Created, Stopped and Terminated. To support variety of analytical needs - Notebook node can 
+be provisioned on any of cloud supported instance shape for your particular region. From analytical software, which is 
+already pre-installed on a notebook node, end users can access (read/write) data stored on buckets/containers.
 
 ### Create
 
-To create Notebook node, click on the “Create new” button. Then, in drop-down menu choose template type (jupyter/rstudio/zeppelin/tensor), enter notebook name and choose instance shape. After clicking the button “Create”, notebook node will be deployed and started.
+To create Notebook node, click on the “Create new” button. Then, in drop-down menu choose template type 
+(jupyter/rstudio/zeppelin/tensor/etc.), enter notebook name and choose instance shape. After clicking the button 
+“Create”, notebook node will be deployed and started.
 
 List of parameters for Notebook node creation:
 
@@ -1057,7 +994,8 @@
 
 ### Stop
 
-In order to stop Notebook node, click on the “gear” button in Actions column. From the drop-down menu click on “Stop” action.
+In order to stop Notebook node, click on the “gear” button in Actions column. From the drop-down menu click on “Stop” 
+action.
 
 List of parameters for Notebook node stopping:
 
@@ -1104,7 +1042,8 @@
 
 ### Start
 
-In order to start Notebook node, click on the button, which looks like gear in “Action” field. Then in drop-down menu choose “Start” action.
+In order to start Notebook node, click on the button, which looks like gear in “Action” field. Then in drop-down menu 
+choose “Start” action.
 
 List of parameters for Notebook node start:
 
@@ -1157,7 +1096,8 @@
 
 ### Terminate
 
-In order to terminate Notebook node, click on the button, which looks like gear in “Action” field. Then in drop-down menu choose “Terminate” action.
+In order to terminate Notebook node, click on the button, which looks like gear in “Action” field. Then in drop-down 
+menu choose “Terminate” action.
 
 List of parameters for Notebook node termination:
 
@@ -1205,7 +1145,8 @@
 
 ### List/Install additional libraries
 
-In order to list available libraries (OS/Python2/Python3/R/Others) on Notebook node, click on the button, which looks like gear in “Action” field. Then in drop-down menu choose “Manage libraries” action.
+In order to list available libraries (OS/Python2/Python3/R/Others) on Notebook node, click on the button, which looks 
+like gear in “Action” field. Then in drop-down menu choose “Manage libraries” action.
 
 <details><summary>In Amazon <i>(click to expand)</i></summary>
 
@@ -1333,7 +1274,8 @@
 
 ### Manage git credentials
 
-In order to manage git credentials on Notebook node, click on the button “Git credentials”. Then in menu you can add or edit existing credentials.
+In order to manage git credentials on Notebook node, click on the button “Git credentials”. Then in menu you can add or 
+edit existing credentials.
 
 <details><summary>In Amazon <i>(click to expand)</i></summary>
 
@@ -1401,11 +1343,15 @@
 
 ## Dataengine-service cluster <a name="Dataengine-service_cluster"></a>
 
-Dataengine-service is a cluster provided by cloud as a service (EMR on AWS) can be created if more computational resources are needed for executing analytical algorithms and models, triggered from analytical tools. Jobs execution will be scaled to a cluster mode increasing the performance and decreasing execution time.
+Dataengine-service is a cluster provided by cloud as a service (EMR on AWS) can be created if more computational 
+resources are needed for executing analytical algorithms and models, triggered from analytical tools. Jobs execution 
+will be scaled to a cluster mode increasing the performance and decreasing execution time.
 
 ### Create
 
-To create dataengine-service cluster click on the “gear” button in Actions column, and click on “Add computational resources”. Specify dataengine-service version, fill in dataengine-service name, specify number of instances and instance shapes. Click on the “Create” button.
+To create dataengine-service cluster click on the “gear” button in Actions column, and click on “Add computational 
+resources”. Specify dataengine-service version, fill in dataengine-service name, specify number of instances and 
+instance shapes. Click on the “Create” button.
 
 List of parameters for dataengine-service cluster creation:
 
@@ -1491,7 +1437,8 @@
 
 ### List/Install additional libraries
 
-In order to list available libraries (OS/Python2/Python3/R/Others) on Dataengine-service, click on the button, which looks like gear in “Action” field. Then in drop-down menu choose “Manage libraries” action.
+In order to list available libraries (OS/Python2/Python3/R/Others) on Dataengine-service, click on the button, which 
+looks like gear in “Action” field. Then in drop-down menu choose “Manage libraries” action.
 
 <details><summary>In Amazon <i>(click to expand)</i></summary>
 
@@ -1584,11 +1531,14 @@
 
 ## Dataengine cluster <a name="Dataengine_cluster"></a>
 
-Dataengine is cluster based on Standalone Spark framework can be created if more computational resources are needed for executing analytical algorithms, but without additional expenses for cloud provided service.
+Dataengine is cluster based on Standalone Spark framework can be created if more computational resources are needed for 
+executing analytical algorithms, but without additional expenses for cloud provided service.
 
 ### Create
 
-To create Spark standalone cluster click on the “gear” button in Actions column, and click on “Add computational resources”. Specify dataengine version, fill in dataengine name, specify number of instances and instance shapes. Click on the “Create” button.
+To create Spark standalone cluster click on the “gear” button in Actions column, and click on “Add computational 
+resources”. Specify dataengine version, fill in dataengine name, specify number of instances and instance shapes. 
+Click on the “Create” button.
 
 List of parameters for dataengine cluster creation:
 
@@ -1703,7 +1653,8 @@
 
 ### List/Install additional libraries
 
-In order to list available libraries (OS/Python2/Python3/R/Others) on Dataengine, click on the button, which looks like gear in “Action” field. Then in drop-down menu choose “Manage libraries” action.
+In order to list available libraries (OS/Python2/Python3/R/Others) on Dataengine, click on the button, which looks like 
+gear in “Action” field. Then in drop-down menu choose “Manage libraries” action.
 
 <details><summary>In Amazon <i>(click to expand)</i></summary>
 
@@ -1849,12 +1800,15 @@
 
 ## DLab Web UI <a name="DLab Web UI"></a>
 
-DLab self service is listening to the secure 8443 port. This port is used for secure local communication with provisioning service.
+DLab self service is listening to the secure 8443 port. This port is used for secure local communication with 
+provisioning service.
 
 There is also Nginx proxy server running on Self-Service node, which proxies remote connection to local 8443 port.
-Nginx server is listening to both 80 and 443 ports by default. It means that you could access self-service Web UI using non-secure connections (80 port) or secure (443 port).
+Nginx server is listening to both 80 and 443 ports by default. It means that you could access self-service Web UI using 
+non-secure connections (80 port) or secure (443 port).
 
-Establishing connection using 443 port you should take into account that DLab uses self-signed certificate from the box, however you are free to switch Nginx to use your own domain-verified certificate.
+Establishing connection using 443 port you should take into account that DLab uses self-signed certificate from the box, 
+however you are free to switch Nginx to use your own domain-verified certificate.
 
 To disable non-secure connection please do the following:
 -   uncomment at /etc/nginx/conf.d/nginx_proxy.conf file rule that rewrites all requests from 80 to 443 port;
@@ -1875,14 +1829,18 @@
 -   separate system process;
 -   manual loading or use external scheduler;
 
-The billing  module is running as part of the Self-Service (if billing was switched ON before SSN deployment). For details please refer to section [Self-Service Node](#Self_Service_Node). Otherwise, you should manually configure file billing.yml. See the descriptions how to do this in the configuration file. Please also note, that you should also add an entry in the Mongo database into collection:
+The billing  module is running as part of the Self-Service (if billing was switched ON before SSN deployment). For 
+details please refer to section [Self-Service Node](#Self_Service_Node). Otherwise, you should manually configure file 
+billing.yml. See the descriptions how to do this in the configuration file. Please also note, that you should also add 
+an entry in the Mongo database into collection:
 ```
 {
     "_id": "conf_tag_resource_id",
     "Value": "<CONF_TAG_RESOURCE_ID>"
 }
 ```
-After you have configured the billing, you can run it as a process of Self-Service. To do this, in the configuration file self-service.yml set the property **BillingSchedulerEnabled** to **true** and restart the Self-Service:
+After you have configured the billing, you can run it as a process of Self-Service. To do this, in the configuration 
+file self-service.yml set the property **BillingSchedulerEnabled** to **true** and restart the Self-Service:
 ```
 sudo supervisorctl stop ui
 sudo supervisorctl start ui
@@ -2005,6 +1963,7 @@
 docker build --build-arg OS=<os_family> --file general/files/<cloud_provider>/base_Dockerfile -t docker.dlab-base .
 docker build --build-arg OS=<os_family> --file general/files/<cloud_provider>/edge_Dockerfile -t docker.dlab-edge .
 docker build --build-arg OS=<os_family> --file general/files/<cloud_provider>/jupyter_Dockerfile -t docker.dlab-jupyter .
+docker build --build-arg OS=<os_family> --file general/files/<cloud_provider>/jupyterlab_Dockerfile -t docker.dlab-jupyterlab .
 docker build --build-arg OS=<os_family> --file general/files/<cloud_provider>/rstudio_Dockerfile -t docker.dlab-rstudio .
 docker build --build-arg OS=<os_family> --file general/files/<cloud_provider>/zeppelin_Dockerfile -t docker.dlab-zeppelin .
 docker build --build-arg OS=<os_family> --file general/files/<cloud_provider>/tensor_Dockerfile -t docker.dlab-tensor .
@@ -2016,7 +1975,8 @@
 ----------------
 # Development <a name="Development"></a>
 
-DLab services could be ran in development mode. This mode emulates real work an does not create any resources on cloud provider environment.
+DLab services could be ran in development mode. This mode emulates real work an does not create any resources on cloud 
+provider environment.
 
 ## Folder structure <a name="Folder_structure"></a>
 
@@ -2041,7 +2001,8 @@
 
 ## Pre-requisites <a name="Pre-requisites"></a>
 
-In order to start development of Front-end Web UI part of DLab - Git repository should be cloned and the following packages should be installed:
+In order to start development of Front-end Web UI part of DLab - Git repository should be cloned and the following 
+packages should be installed:
 
 -   Git 1.7 or higher
 -   Python 2.7 with library Fabric v1.14.0
@@ -2062,7 +2023,8 @@
 
 #### Self-Service
 
-Self-Service provides REST based API’s. It tightly interacts with Provisioning Service and Security Service and actually delegates most of user\`s requests for execution.
+Self-Service provides REST based API’s. It tightly interacts with Provisioning Service and Security Service and actually 
+delegates most of user\`s requests for execution.
 
 | API class name                  | Supported actions                                     | Description            |
 |---------------------------------|-------------------------------------------------------|------------------------|
@@ -2078,12 +2040,14 @@
 | SecurityResource                | Login<br>Authorize<br>Logout                          | User’s authentication. |
 | UserSettingsResource            | Get settings<br>Save settings                         | User’s preferences.    |
 
-Some class names may have endings like Aws or Azure(e.g. ComputationalResourceAws, ComputationalResourceAzure, etc...). It means that it's cloud specific class with a proper API
+Some class names may have endings like Aws or Azure(e.g. ComputationalResourceAws, ComputationalResourceAzure, etc...). 
+It means that it's cloud specific class with a proper API
 
 
 #### Provisioning Service
 
-The Provisioning Service is key, REST based service for management of cloud specific or Docker based environment resources like computational, exploratory,
+The Provisioning Service is key, REST based service for management of cloud specific or Docker based environment 
+resources like computational, exploratory,
 edge, etc.
 
 | API class name            | Supported actions                       | Description                                                                  |
@@ -2096,18 +2060,25 @@
 | InfrastructureResource    | Status                                  | Docker action for obtaining status of DLab infrastructure instances.         |
 | LibExploratoryResource    | Lib list<br>Install lib                 | Docker actions to install libraries on netobboks                             |
 
-Some class names may have endings like Aws or Azure(e.g. ComputationalResourceAws, ComputationalResourceAzure, etc...). It means that it's cloud specific class with a proper API
+Some class names may have endings like Aws or Azure(e.g. ComputationalResourceAws, ComputationalResourceAzure, etc...). 
+It means that it's cloud specific class with a proper API
 
 #### Security service
 
-Security service is REST based service for user authentication against LDAP/LDAP + AWS/Azure OAuth2 depending on module configuration and cloud provider.
+Security service is REST based service for user authentication against LDAP/LDAP + AWS/Azure OAuth2 depending on module 
+configuration and cloud provider.
 LDAP only provides with authentication end point that allows to verify authenticity of users against LDAP instance.
-If you use AWS cloud provider LDAP + AWS authentication could be useful as it allows to combine LDAP authentication and verification if user has any role in AWS account
+If you use AWS cloud provider LDAP + AWS authentication could be useful as it allows to combine LDAP authentication and 
+verification if user has any role in AWS account
 
-DLab provides OAuth2(client credentials and authorization code flow) security authorization mechanism for Azure users. This kind of authentication is required when you are going to use Data Lake. If Data Lake is not enabled you have two options LDAP or OAuth2
+DLab provides OAuth2(client credentials and authorization code flow) security authorization mechanism for Azure users. 
+This kind of authentication is required when you are going to use Data Lake. If Data Lake is not enabled you have two 
+options LDAP or OAuth2
 If OAuth2 is in use security-service validates user's permissions to configured permission scope(resource in Azure).
-If Data Lake is enabled default permission scope(can be configured manually after deploy DLab) is Data Lake Store account so only if user has any role in scope of Data Lake Store Account resource he/she will be allowed to log in
-If Data Lake is disabled but Azure OAuth2 is in use default permission scope will be Resource Group where DLab is created and only users who have any roles in the resource group will be allowed to log in.
+If Data Lake is enabled default permission scope(can be configured manually after deploy DLab) is Data Lake Store 
+account so only if user has any role in scope of Data Lake Store Account resource he/she will be allowed to log in
+If Data Lake is disabled but Azure OAuth2 is in use default permission scope will be Resource Group where DLab is 
+created and only users who have any roles in the resource group will be allowed to log in.
 
 
 ## Front-end <a name="Front_end"></a>
@@ -2129,7 +2100,9 @@
 
 ## How to setup local development environment <a name="setup_local_environment"></a>
 
-The development environment setup description is written with assumption that user already has installed Java8 (JDK), Maven3 and set environment variables (JAVA\_HOME, M2\_HOME).­­­­­­ The description will cover Mongo installation, Mongo user creation, filling initial data into Mongo, Node.js installation
+The development environment setup description is written with assumption that user already has installed Java8 (JDK), 
+Maven3 and set environment variables (JAVA\_HOME, M2\_HOME).­­­­­­ The description will cover Mongo installation, Mongo 
+user creation, filling initial data into Mongo, Node.js installation
 
 ### Install Mongo database
 
@@ -2171,7 +2144,8 @@
 
 ### Setting up environment options
 
-  * Set option CLOUD_TYPE to aws/azure, DEV\_MODE to **true**, mongo database name and password in configuration file dlab/infrastructure-provisioning/src/ssn/templates/ssn.yml
+  * Set option CLOUD_TYPE to aws/azure, DEV\_MODE to **true**, mongo database name and password in configuration file 
+  dlab/infrastructure-provisioning/src/ssn/templates/ssn.yml
 
 ```
 <#assign CLOUD_TYPE="aws">
@@ -2246,7 +2220,8 @@
 
   * Sign the certificate.
 
-  * Import the certificate into a truststore: a repository of certificates used for verifying the certificates. A truststore typically contains more than one certificate.
+  * Import the certificate into a truststore: a repository of certificates used for verifying the certificates. A 
+  truststore typically contains more than one certificate.
 
 Please find below set of commands to create certificate, depending on OS.
 
@@ -2276,14 +2251,20 @@
 
 ## How to run locally <a name="run_locally"></a>
 
-There is a possibility to run Self-Service and Provisioning Service locally. All requests from Provisioning Service to Docker are mocked and instance creation status will be persisted to Mongo (only without real impact on Docker and AWS). Security Service can\`t be running on local machine because of local LDAP mocking complexity.
+There is a possibility to run Self-Service and Provisioning Service locally. All requests from Provisioning Service to 
+Docker are mocked and instance creation status will be persisted to Mongo (only without real impact on Docker and AWS). 
+Security Service can\`t be running on local machine because of local LDAP mocking complexity.
 
-Both services, Self-Service and Provisioning Service are dependent on dlab/provisioning-infrastructure/ssn/templates/ssn.yml configuration file. Both services have main functions as entry point, SelfServiceApplication for Self-Service and ProvisioningServiceApplication for Provisioning Service. Services could be started by running main methods of these classes. Both main functions require two arguments:
+Both services, Self-Service and Provisioning Service are dependent on dlab/provisioning-infrastructure/ssn/templates/ssn.yml
+configuration file. Both services have main functions as entry point, SelfServiceApplication for Self-Service and ProvisioningServiceApplication for Provisioning Service. Services could be started by running main methods of these classes. Both main functions require two arguments:
 
   * Run mode (“server”)
-  * Configuration file name (“self-service.yml” or “provisioning.yml”  depending on the service). Both files are located in root service directory. These configuration files contain service settings and are ready to use.
+  * Configuration file name (“self-service.yml” or “provisioning.yml”  depending on the service). Both files are located
+  in root service directory. These configuration files contain service settings and are ready to use.
 
-The services start up order does matter. Since Self-Service depends on Provisioning Service, the last should be started first and Self-Service afterwards. Services could be started from local IDEA (Eclipse or Intellij Idea) “Run” functionality of toolbox.
+The services start up order does matter. Since Self-Service depends on Provisioning Service, the last should be started 
+first and Self-Service afterwards. Services could be started from local IDEA (Eclipse or Intellij Idea) “Run” 
+functionality of toolbox.
 
 Run application flow is following:
 
@@ -2314,9 +2295,13 @@
             ├───edge
             ├───general
             ├───jupyter
+            ├───jupyterlab
+            ├───project
             ├───rstudio
             ├───ssn
+            ├───superset
             ├───tensor
+            ├───tensor-rstudio
             └───zeppelin
 
 Each directory except *general* contains Python scripts, Docker files, templates, files for appropriate Docker image.
@@ -2407,7 +2392,8 @@
 docker run -i -v /root/KEYNAME.pem:/root/keys/KEYNAME.pem –v /web_app:/root/web_app -e "conf_os_family=debian" -e "conf_cloud_provider=aws" -e "conf_resource=ssn" -e "aws_ssn_instance_size=t2.medium" -e "aws_region=us-west-2" -e "aws_vpc_id=vpc-111111" -e "aws_subnet_id=subnet-111111" -e "aws_security_groups_ids=sg-11111,sg-22222,sg-33333" -e "conf_key_name=KEYNAME" -e "conf_service_base_name=dlab_test" -e "aws_access_key=Access_Key_ID" -e "aws_secret_access_key=Secret_Access_Key" -e "conf_tag_resource_id=dlab" docker.dlab-ssn --action create ;
 ```
 
--   Docker executes *entrypoint.py* script with action *create*. *Entrypoint.py* will set environment variables, which were provided from Docker and execute *general/api/create.py* script:
+-   Docker executes *entrypoint.py* script with action *create*. *Entrypoint.py* will set environment variables, 
+    which were provided from Docker and execute *general/api/create.py* script:
 ```
     elif args.action == 'create':
         with hide('running'):
@@ -2418,7 +2404,8 @@
   try:
         local('cd /root; fab run')
 ```
--   Function *run()* in file *ssn/fabfile.py* will be executed. It will run two scripts *general/scripts/aws/ssn\_prepare.py* and *general/scripts/aws/ssn\_configure.py*:
+-   Function *run()* in file *ssn/fabfile.py* will be executed. It will run two scripts *general/scripts/aws/ssn\_prepare.py*
+    and *general/scripts/aws/ssn\_configure.py*:
 ```
     try:
         local("~/scripts/{}.py".format('ssn_prepare'))
@@ -2434,7 +2421,8 @@
         append_result("Failed configuring SSN node. Exception: " + str(err))
         sys.exit(1)
 ```
--   The scripts *general/scripts/<cloud_provider>/ssn\_prepare.py* an *general/scripts/<cloud_provider>/ssn\_configure.py* will execute other Python scripts/functions for:
+-   The scripts *general/scripts/<cloud_provider>/ssn\_prepare.py* an *general/scripts/<cloud_provider>/ssn\_configure.py* 
+    will execute other Python scripts/functions for:
   1. *ssn\_prepate.py:*
     1. Creating configuration file (for AWS)
     2. Creating Cloud resources.
@@ -2505,9 +2493,12 @@
         sys.exit(1)
 ```
 
-This function describes process of creating Jupyter node. It is divided into two parts – prepare and configure. Prepare part is common for all notebook templates and responsible for creating of necessary cloud resources, such as EC2 instances, etc. Configure part describes how the appropriate services will be installed.
+This function describes process of creating Jupyter node. It is divided into two parts – prepare and configure. Prepare 
+part is common for all notebook templates and responsible for creating of necessary cloud resources, such as EC2 
+instances, etc. Configure part describes how the appropriate services will be installed.
 
-To configure Jupyter node, the script *jupyter\_configure.py* is executed. This script describes steps for configuring Jupyter node. In each step, the appropriate Python script is executed.
+To configure Jupyter node, the script *jupyter\_configure.py* is executed. This script describes steps for configuring 
+Jupyter node. In each step, the appropriate Python script is executed.
 
 For example:
 
@@ -2548,7 +2539,8 @@
     ensure_jre_jdk(args.os_user)
 ```
 
-This script call functions for configuring Jupyter node. If this function is OS dependent, it will be placed in *infrastructure-provisioning/src/general/lib/\<OS\_family\>/debian/notebook\_lib.py*
+This script call functions for configuring Jupyter node. If this function is OS dependent, it will be placed in 
+*infrastructure-provisioning/src/general/lib/\<OS\_family\>/debian/notebook\_lib.py*
 
 All functions in template directory (e.g. *infrastructure-provisioning/src/my-tool/*) should be OS and cloud independent.
 
@@ -2564,8 +2556,10 @@
 -   scripts directory – contains all required configuration scripts.
 
 
--   *infrastructure-provisioning/src/general/files/<cloud_provider>/my-tool_Dockerfile* – used for building template Docker image and describes which files, scripts, templates are required and will be copied to template Docker image.
--   *infrastructure-provisioning/src/general/files/<cloud_provider>/my-tool_descriptsion.json* – JSON file for DLab Web UI. In this file you can specify:
+-   *infrastructure-provisioning/src/general/files/<cloud_provider>/my-tool_Dockerfile* – used for building template 
+    Docker image and describes which files, scripts, templates are required and will be copied to template Docker image.
+-   *infrastructure-provisioning/src/general/files/<cloud_provider>/my-tool_descriptsion.json* – JSON file for DLab Web 
+    UI. In this file you can specify:
   * exploratory\_environment\_shapes – list of EC2 shapes
   * exploratory\_environment\_versions – description of template
 
@@ -2607,8 +2601,8 @@
 
 -   files – directory for files used by newly added templates only;
 
-All Docker images are being built while creating SSN node. To add newly created template, add it to the list of images in the
-following script:
+All Docker images are being built while creating SSN node. To add newly created template, add it to the list of images 
+in the following script:
 
 Path: *infrastructure-provisioning/src/general/scripts/aws/ssn\_configure.py*
 ```
@@ -2636,8 +2630,10 @@
 
 ### Unified logging and group management
 
-There are a few popular LDAP distributions on the market like Active Directory, Open LDap. That’s why some differences in configuration appear.
-Also depending on customization, there might be differences in attributes configuration. For example the DN(distinguished name) may contain different attributes:
+There are a few popular LDAP distributions on the market like Active Directory, Open LDap. That’s why some differences 
+in configuration appear.
+Also depending on customization, there might be differences in attributes configuration. For example the 
+DN(distinguished name) may contain different attributes:
 
 - **DN=CN=Name Surname,OU=groups,OU=EPAM,DC=Company,DC=Cloud**
 - **DN=UID=UID#53,OU=groups,OU=Company,DC=Company,DC=Cloud**
@@ -2646,13 +2642,15 @@
 
 The relation between users and groups also varies from vendor to vendor.
 
-For example, in Open LDAP the group object may contain set (from 0 to many) attributes **"memberuid"** with values equal to user`s attribute **“uid”**.
+For example, in Open LDAP the group object may contain set (from 0 to many) attributes **"memberuid"** with values 
+equal to user`s attribute **“uid”**.
 
 However, in Active Directory the mappings are done based on other attributes.
 On a group size there is attribute **"member"** (from 0 to many values) and its value is user`s **DN** (distinguished name).
 
 
-To fit the unified way of LDAP usage, we introduced configuration file with set of properties and customized scripts (python and JavaScript based).
+To fit the unified way of LDAP usage, we introduced configuration file with set of properties and customized scripts 
+(python and JavaScript based).
 On backend side, all valuable attributes are further collected and passed to these scripts.
 To apply some customization it is required to update a few properties in **security.yml** and customize the scripts.
 
@@ -2665,7 +2663,8 @@
 - **ldapSearchAttribute: uid**
 
 Where the:
-- **ldapBindTemplate** is a user`s DN template which should be filed with custom value. Here the template could be changed: uid=%s,ou=People,dc=example,dc=com -> cn=%s,ou=People,dc=example,dc=com.
+- **ldapBindTemplate** is a user`s DN template which should be filed with custom value. Here the template could be 
+  changed: uid=%s,ou=People,dc=example,dc=com -> cn=%s,ou=People,dc=example,dc=com.
 - **ldapBindAttribute** - this is a major attribute, on which the DN is based on. Usually it is any of: uid or cn, or email.
 - **ldapSearchAttribute** - another attribute, based on which users will be looked up in LDAP.
 
@@ -2684,7 +2683,8 @@
 
 ### Script structure
 
-The scripts above were created to flexibly manage user`s security configuration. They all are part of **security.yml** configuration. All scripts have following structure:
+The scripts above were created to flexibly manage user`s security configuration. They all are part of **security.yml** 
+configuration. All scripts have following structure:
     - **name**
     - **cache**
     - **expirationTimeMsec**
@@ -2700,7 +2700,8 @@
 Major properties are:
 - **attributes**             - list of attributes that will be retrieved from LDAP (-name, -cn, -uid, -member, etc);
 - **filter**               - the filter, based on which the object will be retrieved from LDAP;
-- **searchResultProcessor**    - optional. If only LDAP object attributes retrieving is required, this property should be empty. For example, “userLookup” script only retrieves list of "attributes". Otherwise, code customization (like user enrichment, user to groups matching, etc.) should be added into sub-properties below:
+- **searchResultProcessor**    - optional. If only LDAP object attributes retrieving is required, this property should 
+  be empty. For example, “userLookup” script only retrieves list of "attributes". Otherwise, code customization (like user enrichment, user to groups matching, etc.) should be added into sub-properties below:
   - **language**                - the script language - "python" or "JavaScript"
   - **code**                    - the script code.
 
@@ -2728,14 +2729,16 @@
     base: ou=users,ou=alxn,dc=alexion,dc=cloud
     filter: "(&(objectCategory=person)(objectClass=user)(mail=%mail%))"
 
-In the example above, the user login passed from GUI is a mail (**ldapSearchAttribute: mail**) and based on the filer (**filter: "(&(objectCategory=person)(objectClass=user)(mail=%mail%))")** so, the service would search user by its **“mail”**.
+In the example above, the user login passed from GUI is a mail (**ldapSearchAttribute: mail**) and based on the filer 
+(**filter: "(&(objectCategory=person)(objectClass=user)(mail=%mail%))")** so, the service would search user by its **“mail”**.
 If corresponding users are found - the script will return additional user`s attributes:
   - cn
   - gidNumber
   - mail
   - memberOf
 
-User`s authentication into LDAP would be done for DN with following template **ldapBindTemplate: 'cn=%s,ou=users,ou=alxn,dc=alexion,dc=cloud'**, where CN is attribute retrieved by  **“userLookUp”** script.
+User`s authentication into LDAP would be done for DN with following template **ldapBindTemplate: 'cn=%s,ou=users,ou=alxn,
+dc=alexion,dc=cloud'**, where CN is attribute retrieved by  **“userLookUp”** script.
 
 ## Azure OAuth2 Authentication <a name="Azure_OAuth2_Authentication"></a>
 DLab supports OAuth2 authentication that is configured automatically in Security Service and Self Service after DLab deployment.
@@ -2758,18 +2761,22 @@
         maxSessionDurabilityMilliseconds: 288000000
 
 where:
-- **useLdap** - defines if LDAP authentication is enabled(true/false). If false Azure OAuth2 takes place with configuration properties below
+- **useLdap** - defines if LDAP authentication is enabled(true/false). If false Azure OAuth2 takes place with 
+  configuration properties below
 - **tenant** - tenant id of your company
 - **authority** - Microsoft login endpoint
 - **clientId** - id of the application that users log in through
 - **redirectUrl** - redirect URL to DLab application after try to login to Azure using OAuth2
 - **responseMode** - defines how Azure sends authorization code or error information to DLab during log in procedure
 - **prompt** - defines kind of prompt during Oauth2 login
-- **silent** - defines if DLab tries to log in user without interaction(true/false), if false DLab tries to login user with configured prompt
+- **silent** - defines if DLab tries to log in user without interaction(true/false), if false DLab tries to login user 
+  with configured prompt
 - **loginPage** - start page of DLab application
-- **maxSessionDurabilityMilliseconds** - max user session durability. user will be asked to login after this period of time and when he/she creates ot starts notebook/cluster. This operation is needed to update refresh_token that is used by notebooks to access Data Lake Store
+- **maxSessionDurabilityMilliseconds** - max user session durability. user will be asked to login after this period 
+  of time and when he/she creates ot starts notebook/cluster. This operation is needed to update refresh_token that is used by notebooks to access Data Lake Store
 
-To get more info about *responseMode*, *prompt* parameters please visit [Authorize access to web applications using OAuth 2.0 and Azure Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code)
+To get more info about *responseMode*, *prompt* parameters please visit 
+[Authorize access to web applications using OAuth 2.0 and Azure Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code)
 
 
 ### Azure OAuth2 Security Service configuration
@@ -2785,11 +2792,16 @@
         managementApiAuthFile: /dlab/keys/azure_authentication.json
 
 where:
-- **useLdap** - defines if LDAP authentication is enabled(true/false). If false Azure OAuth2 takes place with configuration properties below
+- **useLdap** - defines if LDAP authentication is enabled(true/false). If false Azure OAuth2 takes place with 
+  configuration properties below
 - **tenant** - tenant id of your company
 - **authority** - Microsoft login endpoint
 - **clientId** - id of the application that users log in through
 - **redirectUrl** - redirect URL to DLab application after try to login to Azure using OAuth2
-- **validatePermissionScope** - defines(true/false) if user's permissions should be validated to resource that is provided in permissionScope parameter. User will be logged in onlu in case he/she has any role in resource IAM described with permissionScope parameter
-- **permissionScope** - describes Azure resource where user should have any role to pass authentication. If user has no role in resource IAM he/she will not be logged in  
-- **managementApiAuthFile** - authentication file that is used to query Microsoft Graph API to check user roles in resource described in permissionScope  
\ No newline at end of file
+- **validatePermissionScope** - defines(true/false) if user's permissions should be validated to resource that is 
+  provided in permissionScope parameter. User will be logged in onlu in case he/she has any role in resource IAM 
+  described with permissionScope parameter
+- **permissionScope** - describes Azure resource where user should have any role to pass authentication. If user has no 
+  role in resource IAM he/she will not be logged in  
+- **managementApiAuthFile** - authentication file that is used to query Microsoft Graph API to check user roles in 
+  resource described in permissionScope  
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index cf4e3a3..2b43591 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,60 +1,69 @@
+#
 # DLab is Self-service, Fail-safe Exploratory Environment for Collaborative Data Science Workflow
 
-## New features in v2.2
+## New features in v2.3
+
 **All Cloud platforms:**
-- added concept of Projects into DLab. Now users can unite under Projects and collaborate
-- for ease of use we've added web terminal for all DLab Notebooks
-- updated versions of installed software:
-	* angular 8.2.7
+- Added support for multi-Cloud orchestration for AWS, Azure and GCP. Now, a single DLab instance can connect to the above Clouds, by means of respective set of API&#39;s, deployed on cloud endpoints;
+- Added JupyterLab v.0.35.6 template
+- Updated versions of installed software:
+  - Jupyter notebook v.6.0.2;
+  - Apache Zeppelin v.0.8.2;
+  - RStudio v.1.2.5033;
+  - Apache Spark v.2.4.4 for standalone cluster;
+
+**AWS:**
+- Added support of new version of Data Engine Service (EMR) v.5.28.0;
 
 **GCP:**
-- added billing report to monitor Cloud resources usage into DLab, including ability to manage billing quotas
-- updated versions of installed software:
-	* Dataproc 1.3
+- Added support of new version of Data Engine Service (Dataproc) v.1.4;
+- Added new template Superset v.0.35.1;
 
-## Improvements in v2.2
+## Improvements in v2.3
 **All Cloud platforms:**
-- implemented login via KeyCloak to support integration with multiple SAML and OAUTH2 identity providers
-- added DLab version into WebUI
-- augmented ‘Environment management’ page
-- added possibility to tag Notebook from UI
-- added possibility to terminate computational resources via scheduler
+- Grouped project management actions in single Edit project menu for ease of use;
+- Introduced new &quot;project admin&quot; role;
+- SSO now also works for Notebooks;
+- Implemented ability to filter installed libraries;
+- Added possibility to sort by project/user/charges in &#39;Billing report&#39; page;
+- Added test option for remote endpoint;
 
-**GCP:**
-- added possibility to create Notebook/Data Engine from an AMI image
-
-**AWS and GCP:**
-- UnGit tool now allows working with remote repositories over ssh
-- implemented possibility to view Data Engine Service version on UI after creation
-
-## Bug fixes in v2.2
+## Bug fixes in v2.3
 **All Cloud platforms:**
-- fixed  sparklyr library (r package) installation on RStudio, RStudio with TensorFlow notebooks
+- Fixed a bug when Notebook name should be unique per project for different users, since it was impossible to operate Notebook with the same name after the first instance creation;
+- Fixed a bug when administrator could not stop/terminate Notebook/computational resources created by another user;
+- Fixed a bug when shell interpreter was not showing up for Apache Zeppelin;
+- Fixed a bug when scheduler by start time was not triggered for Data Engine;
+- Fixed a bug when it was possible to start Notebook if project quota was exceeded;
+- Fixed a bug when scheduler for stopping was not triggered after total quota depletion;
 
-**GCP:**
-- fixed a bug when Data Engine creation fails for DeepLearning template
-- fixed a bug when Jupyter does not start successfully after Data Engine Service creation (create Jupyter -> create Data Engine -> stop Jupyter -> Jupyter fails)
-- fixed a bug when DeepLearning creation was failing
-
-## Known issues in v2.2
-**All Cloud platforms:**
-- Notebook name should be unique per project for different users in another case it is impossible to operate Notebook with the same name after the first instance creation
+**AWS:**
+- Fixed a bug when Notebook image/snapshot were still available after SSN termination;
 
 **Microsoft Azure:**
-- DLab deployment  is unavailable if Data Lake is enabled
-- custom image creation from Notebook fails and deletes existed Notebook
+- Fixed a bug when custom image creation from Notebook failed and deleted the existing Notebook of another user;
+- Fixed a bug when detailed billing was not available;
+- Fixed a bug when spark reconfiguration failed on Data Engine;
+- Fixed a bug when billing data was not available after calendar filter usage;
 
-**Refer to the following link in order to view the other major/minor issues in v2.2:**
+## Known issues in v2.3
+**GCP:**
+- SSO is not available for Superset;
 
-[Apache DLab: known issues](https://issues.apache.org/jira/issues/?filter=12347602 "Apache DLab: known issues")
-
-## Known issues caused by cloud provider limitations in v2.2
 **Microsoft Azure:**
-- resource name length should not exceed 80 chars
-- TensorFlow templates are not supported for Red Hat Enterprise Linux
-- low priority Virtual Machines are not supported yet
+- Notebook creation fails on RedHat;
+- Web terminal is not working for Notebooks only for remote endpoint;
+
+Refer to the following link in order to view the other major/minor issues in v2.3:
+
+[Apache DLab: known issues](https://issues.apache.org/jira/issues/?filter=12348876#](https://issues.apache.org/jira/issues/?filter=12348876 "Apache DLab: known issues")
+
+## Known issues caused by cloud provider limitations in v2.3
+**Microsoft Azure:**
+- Resource name length should not exceed 80 chars;
+- TensorFlow templates are not supported for RedHat Enterprise Linux;
+- Low priority Virtual Machines are not supported yet;
 
 **GCP:**
-- resource name length should not exceed 64 chars
-- billing data is not available
-- **NOTE:** DLab has not been tested on GCP for Red Hat Enterprise Linux
+- Resource name length should not exceed 64 chars;
+- NOTE: DLab has not been tested on GCP for RedHat Enterprise Linux;
diff --git a/USER_GUIDE.md b/USER_GUIDE.md
index d821a93..78876fc 100644
--- a/USER_GUIDE.md
+++ b/USER_GUIDE.md
@@ -10,7 +10,7 @@
 
 [Login](#login)
 
-[Setup a Gateway/Edge node](#setup_edge_node)
+[Create project](#setup_edge_node)
 
 [Setting up analytical environment and managing computational power](#setup_environmen)
 
@@ -26,35 +26,34 @@
 
 &nbsp; &nbsp; &nbsp; &nbsp; [Deploy Computational resource](#computational_deploy)
 
-&nbsp; &nbsp; &nbsp; &nbsp; [Stop Apache Spark cluster](#spark_stop)
+&nbsp; &nbsp; &nbsp; &nbsp; [Stop Standalone Apache Spark cluster](#spark_stop)
 
 &nbsp; &nbsp; &nbsp; &nbsp; [Terminate Computational resource](#computational_terminate)
 
+&nbsp; &nbsp; &nbsp; &nbsp; [Scheduler](#scheduler)
+
 &nbsp; &nbsp; &nbsp; &nbsp; [Collaboration space](#collaboration_space)
 
 &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Manage Git credentials](#git_creds)
 
 &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Git UI tool (ungit)](#git_ui)
 
-[DLab Health Status Page](#health_page)
+[Administration](#administration)
 
-&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Backup](#backup)
+&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Manage roles](#manage_roles)
 
-&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Manage environment](#manage_environment)
+&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Project management](#project_management)
 
-&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Manage roles](#manage_roles)
+&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Environment management](#environment_management)
 
-&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [SSN monitor](#ssn_monitor)
+&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Multiple Cloud endpoints](#multiple_cloud_endpoints)
+
+&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; [Manage DLab quotas](#manage_dlab_quotas)
 
 [DLab billing report](#billing_page)
 
-[DLab Environment Management Page](#environment_management)
-
 [Web UI filters](#filter)
 
-[Scheduler](#scheduler)
-
-[Key reupload](#key_reupload)
 
 ---------
 # Login <a name="login"></a>
@@ -65,6 +64,9 @@
 
 -   OpenLdap;
 -   Cloud Identity and Access Management service user validation;
+-   KeyCloak integration for seamless SSO experience *;
+
+    * NOTE: in case has been installed and configured to use SSO, please click on "Login with SSO" and use your corporate credentials
 
 | Login error messages               | Reason                                                                           |
 |------------------------------------|----------------------------------------------------------------------------------|
@@ -76,7 +78,7 @@
 
 To stop working with DLab - click on Log Out link at the top right corner of DLab.
 
-After login user will see warning in case of exceeding quota or close to this limit.
+After login user sees warning in case of exceeding quota or close to this limit.
 
 <p align="center" class="facebox-popup"> 
     <img src="doc/exceeded quota.png" alt="Exceeded quota" width="400">
@@ -87,38 +89,35 @@
 </p>
 
 ----------------------------------
-# Setup a Gateway/Edge node <a name="setup_edge_node"></a>
+# Create project <a name="setup_edge_node"></a>
 
-When you log into DLab Web Application, the first thing you will have to setup is a Gateway Node, or an “Edge” Node.
+When you log into DLab Web interface, the first thing you need to do is to create a new project.
 
-To do this click on “Upload” button on “Create initial infrastructure”, select your personal public key and hit “Create” button or click on "Generate" button on “Create initial infrastructure” and save your private key.
+To do this click on “Upload” button on “Projects” page, select your personal public key (or click on "Generate" button), endpoint, group, 'Use shared image' select enable or disable and hit “Create” button. Do not forget to save your private key.
 
 <p align="center" class="facebox-popup"> 
-    <img src="doc/upload_or_generate_user_key.png" alt="Upload or generate user key" width="400">
+    <img src="doc/upload_or_generate_user_key.png" alt="Upload or generate user key" width="100%">
 </p>
 
-Please note that you need to have a key pair combination (public and private key) to work with DLab. To figure out how to create public and private key, please click on “Where can I get public key?” on “Create initial infrastructure” dialog. DLab build-in wiki page will guide Windows, MasOS and Linux on how to generate SSH key pairs quickly.
+Please note, that you need to have a key pair combination (public and private key) to work with DLab. To figure out how to create public and private key, please click on “Where can I get public key?” on “Projects” page. DLab build-in wiki page guides Windows, MasOS and Linux on how to generate SSH key pairs quickly.
 
-After you hit "Create" or "Generate" button, creation of Edge node will start. This process is a one-time operation for each Data Scientist and it might take up-to 10 minutes for DLab to setup initial infrastructure for you. During this process, you will see following popup in your browser:
+Creation of Project starts after hitting "Create" button. This process is a one-time operation for each Data Scientist and it might take up-to 10 minutes for DLab to setup initial infrastructure for you. During this process project is in status "Creating".
 
-<p align="center"> 
-    <img src="doc/loading_key.png" alt="Loading user key" width="350">
-</p>
+'Use shared image' enabled means, that an image of particular notebook type is created while first notebook of same type is created in DLab. This image will be availble for all DLab users. This image is used for provisioning of further notebooks of same type within DLab. 'Use share image' disabled means, that image of particular notebook type is created while first notebook of same type is created in DLab. This AMI is available for all users withing same project.
 
-As soon as an Edge node is created, Data Scientist will see a blank “List of Resources” page. The message “To start working, please create new environment” will be displayed:
+As soon as Project is created, Data Scientist can create  notebook server on “List of Resources” page. The message “To start working, please create new environment” is appeared on “List of Resources” page:
 
 ![Main page](doc/main_page.png)
 
 ---------------------------------------------------------------------------------------
 # Setting up analytical environment and managing computational power <a name="setup_environmen"></a>
 
-----------------------
+
 ## Create notebook server <a name="notebook_create"></a>
 
 To create new analytical environment from “List of Resources” page click on "Create new" button.
 
-“Create analytical tool” popup will show-up. Data Scientist can choose a preferable analytical tool to be setup. Adding new analytical tools is supported by architecture, so you can expect new templates to show up in upcoming releases.
-
+The "Create analytical tool" popup shows up. Data Scientist can choose the preferred project, endpoint and analytical tool. Adding new analytical toolset is supported by architecture, so you can expect new templates to show up in upcoming releases.
 Currently by means of DLab, Data Scientists can select between any of the following templates:
 
 -   Jupyter
@@ -127,6 +126,8 @@
 -   RStudio with TensorFlow
 -   Jupyter with TensorFlow
 -   Deep Learning (Jupyter + MXNet, Caffe, Caffe2, TensorFlow, CNTK, Theano, Torch and Keras)
+-   JupyterLab
+-   Superset (implemented on GCP)
 
 <p align="center"> 
     <img src="doc/notebook_create.png" alt="Create notebook" width="574">
@@ -134,9 +135,9 @@
 
 After specifying desired template, you should fill in the “Name” and “Instance shape”.
 
-Name field – is just for visual differentiation between analytical tools on “List of resources” dashboard.
+Keep in mind that "Name" field – is just for visual differentiation between analytical tools on “List of resources” dashboard.
 
-Instance shape dropdown, contains configurable list of shapes, which should be chosen depending on the type of analytical work to be performed. Following groups of instance shapes will be showing up with default setup configuration:
+Instance shape dropdown, contains configurable list of shapes, which should be chosen depending on the type of analytical work to be performed. Following groups of instance shapes are showing up with default setup configuration:
 
 <p align="center"> 
     <img src="doc/select_shape.png" alt="Select shape" width="250">
@@ -144,25 +145,29 @@
 
 These groups have T-Shirt based shapes (configurable), that can help Data Scientist to either save money\* and leverage not very powerful shapes (for working with relatively small datasets), or that could boost the performance of analytics by selecting more powerful instance shape.
 
-\* Please refer to official documentation from Amazon that will help you understand what [instance shapes](https://aws.amazon.com/ec2/instance-types/) would be most preferable in your particular DLAB setup. Also, you can use [AWS calculator](https://calculator.s3.amazonaws.com/index.html) to roughly estimate the cost of your environment.
+\* Please refer to official documentation from Amazon that helps you to understand what [instance shapes](https://aws.amazon.com/ec2/instance-types/) are the most preferable in your particular DLAB setup. Also, you can use [AWS calculator](https://calculator.s3.amazonaws.com/index.html) to roughly estimate the cost of your environment.
 
-You can override the default configurations for local spark. The configuration object is referenced as a JSON file. To tune spark configuration check off "Spark configurations" check box and insert JSON format in text box.
+\* Please refer to official documentation from GCP that helps you to understand what [instance shapes](https://cloud.google.com/compute/docs/machine-types) are the most preferable in your particular DLAB setup. Also, you can use [GCP calculator](https://cloud.google.com/products/calculator) to roughly estimate the cost of your environment.
 
-After you Select the template, fill in the Name and choose needed instance shape - you need to click on "Create" button for your instance to start creating. Corresponding record will show up in your dashboard:
+\* Please refer to official documentation from Microsoft Azure that helps you to understand what [virtual machine shapes](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/series/) are the most preferable in your particular DLAB setup. Also, you can use [Microsoft Azure calculator](https://azure.microsoft.com/en-us/pricing/calculator/?&ef_id=EAIaIQobChMItPmK5uj-6AIVj-iaCh0BFgVYEAAYASAAEgJ4KfD_BwE:G:s&OCID=AID2000606_SEM_UOMYUjFz&MarinID=UOMYUjFz_364338000380_microsoft%20azure%20calculator_e_c__76882726955_kwd-300666827690&lnkd=Google_Azure_Brand&dclid=CLC65Ojo_ugCFUWEsgodm18GNA) to roughly estimate the cost of your environment.
+
+You can override the default configurations of local spark. The configuration object is referenced as a JSON file. To tune spark configuration check off "Spark configurations" check box and insert JSON format in the text box.
+
+After you Select the template, fill in the Name and specify desired instance shape - you need to click on "Create" button for your analytical toolset to be created. Corresponding record shows up in your dashboard:
 
 ![Dashboard](doc/main_page2.png)
 
-As soon as notebook server is created, its status will change to Running:
+As soon as notebook server is created, status changes to Running:
 
 ![Running notebook](doc/main_page3.png)
 
-When you click on the name of your Analytical tool in the dashboard – analytical tool popup will show up:
+When you click on the name of your Analytical tool in the dashboard – analytical tool popup shows up:
 
 <p align="center"> 
     <img src="doc/notebook_info.png" alt="Notebook info" width="574">
 </p>
 
-In the header you will see version of analytical tool, its status and shape.
+In the header you see version of analytical tool, its status and shape.
 
 In the body of the dialog:
 
@@ -170,60 +175,54 @@
 -   Analytical tool URL
 -   Git UI tool (ungit)
 -   Shared bucket for all users
--   Bucket that has been provisioned for your needs
+-   Project bucket for project members
 
-To access analytical tool Web UI you proceed with one of the options:
-
--   use direct URL's to access notebooks (your access will be established via reverse proxy, so you don't need to have Edge node tunnel up and running)
--   SOCKS proxy based URL's to access notebooks (via tunnel to Edge node)
-
-If you use direct urls you don't need to open tunnel for Edge node and set SOCKS proxy.
-If you use indirect urls you need to configure SOCKS proxy and open tunnel for Edge node. Please follow the steps described on “Read instruction how to create the tunnel” page to configure SOCKS proxy for Windows/MAC/Linux machines. “Read instruction how to create the tunnel” is available on DLab notebook popup.
+To access analytical tool Web UI you use direct URL's (your access is established via reverse proxy, so you don't need to have Edge node tunnel up and running).
 
 ### Manage libraries <a name="manage_libraries"></a>
 
-On every analytical tool instance you can install additional libraries by clicking on gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the Actions column for a needed Notebook and hit Manage libraries:
+On every analytical tool instance you can install additional libraries by clicking on gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the "Actions" column for a needed Notebook and hit "Manage libraries":
 
 <p align="center"> 
     <img src="doc/notebook_menu_manage_libraries.png" alt="Notebook manage_libraries" width="150">
 </p>
 
-After clicking you will see the window with 3 fields:
--   Field for selecting an active resource to install libraries on
+After clicking you see the window with 3 fields:
+-   Field for selecting an active resource to install libraries
 -   Field for selecting group of packages (apt/yum, Python 2, Python 3, R, Java, Others)
 -   Field for search available packages with autocomplete function except for Java. java library you should enter using the next format: "groupID:artifactID:versionID"
 
 ![Install libraries dialog](doc/install_libs_form.png)
 
-You need to wait for a while after resource choosing till list of all available libraries will be received.
+You need to wait for a while after resource choosing till list of all available libraries is received.
 
 ![Libraries list loading](doc/notebook_list_libs.png)
 
-**Note:** apt or yum packages depends on your DLab OS family.
+**Note:** Apt or yum packages depends on your DLab OS family.
 
 **Note:** In group Others you can find other Python (2/3) packages, which haven't classifiers of version.
 
 ![Resource select_lib](doc/notebook_select_lib.png)
 
-After selecting library, you can see it on the right and could delete in from this list before installing.
+After selecting library, you can see it in the midle of the window and can delete it from this list before installation.
 
 ![Resource selected_lib](doc/notebook_selected_libs.png)
 
-After clicking on "Install" button you will see process of installation with appropriate status.
+After clicking on "Install" button you see process of installation with appropriate status.
 
 ![Resources libs_status](doc/notebook_libs_status.png)
 
-**Note:** If package can't be installed you will see "Failed" in status column and button to retry installation.
+**Note:** If package can't be installed you see "Failed" in status column and button to retry installation.
 
 ### Create image <a name="create_image"></a>
 
-Out of each analytical tool instance you can create an AMI image (notebook should be in Running status), including all libraries, which have been installed on it. You can use that AMI to speed-up provisioining of further analytical tool, if you would like to re-use existing configuration. To create an AMI click on a gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the Actions menu for a needed Notebook and hit "Create AMI":
+Out of each analytical tool instance you can create an AMI image (notebook should be in Running status), including all libraries, which have been installed on it. You can use that AMI to speed-up provisioining of further analytical tool, if you want to re-use existing configuration. To create an AMI click on a gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the "Actions" menu for a needed Notebook and hit "Create AMI":
 
 <p align="center"> 
     <img src="doc/notebook_menu_create_ami.png" alt="Notebook create_ami" width="150">
 </p>
 
-On Create AMI popup you will be asked to fill in:
+On "Create AMI" popup you should fill:
 -   text box for an AMI name (mandatory)
 -   text box for an AMI description (optional)
 
@@ -231,11 +230,11 @@
     <img src="doc/create_ami.png" alt="Create AMI" width="480">
 </p>
 
-After clicking on "Assign" button the Notebook status will change to Creating AMI. Once an image is created the Notebook status changes back to Running.
+After clicking on "Create" button the Notebook status changes to "Creating image". Once an image is created the Notebook status changes back to "Running".
 
-To create new analytical environment from custom image click "Create new" button on “List of Resources” page. 
+To create new analytical environment from custom image click on "Create new" button on “List of Resources” page. 
 
-“Create analytical tool” popup will show-up. Choose a template of a Notebook for which the custom image is created:
+“Create analytical tool” popup shows up. Choose project, endpoint, template of a Notebook for which the custom image has been created:
 
 <p align="center"> 
     <img src="doc/create_notebook_from_ami.png" alt="Create notebook from AMI" width="560">
@@ -243,56 +242,59 @@
 
 Before clicking "Create" button you should choose the image from "Select AMI" and fill in the "Name" and "Instance shape".
 
+**NOTE:** This functionality is implemented for AWS and Azure.
+
 --------------------------
 ## Stop Notebook server <a name="notebook_stop"></a>
 
-Once you have stopped working with an analytical tool and you would like to release cloud resources for the sake of the costs, you might want to Stop the notebook. You will be able to Start the notebook again after a while and proceed with your analytics.
+Once you have stopped working with an analytical tool and you need to release Cloud resources for the sake of the costs, you might want to stop the notebook. You are able to start the notebook later and proceed with your analytical work.
 
-To Stop the Notebook click on a gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the Actions column for a needed Notebook and hit Stop:
+To stop the Notebook click on a gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the "Actions" column for a needed Notebook and hit "Stop":
 
 <p align="center"> 
     <img src="doc/notebook_menu_stop.png" alt="Notebook stopping" width="150">
 </p>
 
-Hit OK in confirmation popup.
+Hit "OK" in confirmation popup.
 
-**NOTE:** if any Computational resources except for Spark cluster have been connected to your notebook server – they will be automatically terminated if you stop the notebook and Spark cluster will be automatically stopped.
+**NOTE:** Connected Data Engine Service becomes Terminated while connected (if any) Data Engine (Standalone Apache Spark cluster) becomes Stopped.
 
 <p align="center"> 
     <img src="doc/notebook_stop_confirm.png" alt="Notebook stop confirm" width="400">
 </p>
 
-After you confirm you intent to Stop the notebook - the status will be changed to Stopping and will become Stopped in a while. Spark cluster status will be changed to Stopped and other Computational resource status  will be changed to Terminated.
+After you confirm your intent to stop the notebook - the status changes to "Stopping" and later becomes "Stopped". 
 
 --------------------------------
 ## Terminate Notebook server <a name="notebook_terminate"></a>
 
-Once you have finished working with an analytical tool and you would like to release cloud resources for the sake of the costs, you might want to Terminate the notebook. You will not be able to Start the notebook which has been Terminated. Instead, you will have to create new Notebook server if you will need to proceed your analytical activities.
+Once you have finished working with an analytical tool and you need don't neeed cloud resources anymore, for the sake of the costs, we recommend to terminate the notebook. You are not able to start the notebook which has been terminated. Instead, you have to create new Notebook if you need to proceed with your analytical activities.
 
-To Terminate the Notebook click on a gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the Actions column for a needed Notebook and hit Terminate:
+**NOTE:** Make sure you back-up your data (if exists on Notebook) and playbooks before termination.
 
-**NOTE:** if any Computational resources have been linked to your notebook server – they will be automatically terminated if you stop the notebook.
+To terminate the Notebook click on a gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the "Actions" column for a needed Notebook and hit "Terminate":
 
-Confirm termination of the notebook and afterward notebook status will be changed to **Terminating**:
+**NOTE:** If any Computational resources have been linked to your notebook server – they are automatically terminated if you terminate the notebook.
+
+Confirm termination of the notebook and afterwards notebook status changes to "Terminating":
 
 ![Notebook terminating](doc/notebook_terminating.png)
 
-Once corresponding instances are terminated on cloud, status will finally
-change to Terminated:
+Once corresponding instances become terminated in Cloud console, status finally changes to "Terminated":
 
 ![Notebook terminated](doc/notebook_terminated.png)
 
 ---------------
 ## Deploy Computational resource <a name="computational_deploy"></a>
 
-After deploying Notebook node, you can deploy Computational resource and it will be automatically linked with your Notebook server. Computational resource is a managed cluster platform, that simplifies running big data frameworks, such as Apache Hadoop and Apache Spark on cloud to process and analyze vast amounts of data. Adding Computational resource is not mandatory and is needed in case computational resources are required for job execution.
+After deploying Notebook node, you can deploy Computational resource and it is automatically linked with your Notebook server. Computational resource is a managed cluster platform, that simplifies running big data frameworks, such as Apache Hadoop and Apache Spark on cloud to process and analyze vast amounts of data. Adding Computational resource is not mandatory and is needed in case computational resources are required for job execution.
 
-On “Create Computational Resource” popup you will have to choose Computational resource version (configurable) and specify alias for it. To setup a cluster that meets your needs – you will have to define:
+On “Create Computational Resource” popup you have to choose Computational resource version (configurable) and specify alias for it. To setup a cluster that meets your needs – you have to define:
 
 -   Total number of instances (min 2 and max 14, configurable);
 -   Master and Slave instance shapes (list is configurable and supports all available cloud instance shapes, supported in your cloud region);
 
-Also, if you would like to save some costs for your Computational resource you can create it based on [spot instances](https://aws.amazon.com/ec2/spot/), which are often available at a discount price (this functionality is only available for AWS cloud):
+Also, if you want to save some costs for your Computational resource you can create it based on [spot instances](https://aws.amazon.com/ec2/spot/)(this functionality is for AWS cloud) or [preemptible instances](https://cloud.google.com/compute/docs/instances/preemptible) (this functionality is for GCP), which are often available at a discount price:
 
 -   Select Spot Instance checkbox;
 -   Specify preferable bid for your spot instance in % (between 20 and 90, configurable).
@@ -304,41 +306,48 @@
     <img src="doc/emr_create.png" alt="Create Computational resource on AWS" width="760">
 </p>
 
-You can override the default configurations for applications by supplying a configuration object for applications when you create a cluster (this functionality is only available for Amazon EMR cluster ). The configuration object is referenced as a JSON file.
+You can override the default configurations for applications by supplying a configuration object for applications when you create a cluster (this functionality is only available for Amazon EMR cluster). The configuration object is referenced as a JSON file.
 To tune computational resource configuration check off "Cluster configurations" check box and insert JSON format in text box:
 
 <p align="center"> 
     <img src="doc/emr_create_configuration.png" alt="Create Custom Computational resource on AWS" width="760">
 </p>
 
+This picture shows menu for creating Computational resource for GCP:
+<p align="center"> 
+    <img src="doc/dataproc_create.png" alt="Create Computational resource on GCP" width="760">
+</p>
+
+To create Data Engine Service (Dataproc) with preemptible instances check off 'preemptible node count'. You can add from 1 to 11 preemptible instances.
+
 This picture shows menu for creating Computational resource for Azure:
 <p align="center"> 
     <img src="doc/dataengine_creating_menu.png" alt="Create Computational resource on Azure" width="760">
 </p>
 
-If you click on "Create" button Computational resource creation will kick off. You will see corresponding record on DLab Web UI in status **Creating**:
+If you click on "Create" button Computational resource creation kicks off. You see corresponding record on DLab Web UI in status "Creating":
 
 ![Creating Computational resource](doc/emr_creating.png)
 
-Once Computational resources are provisioned, their status will be changed to **Running**.
+Once Computational resources are provisioned, their status changes to "Running".
 
-Clicking on Computational resource name in DLab dashboard will open Computational resource details popup:
+After clicking on Computational resource name in DLab dashboard you see Computational resource details popup:
 
 <p align="center"> 
     <img src="doc/emr_info.png" alt="Computational resource info" width="480">
 </p>
 
-Also you can go to computational resource master UI via link "Apache Spark Master' or "EMR Master" (this functionality is only available for AWS cloud).
+Also you can go to computational resource master UI via link "Spark job tracker URL', "EMR job tracker URL" or "Dataproc job tracker URL".
 
 Since Computational resource is up and running - you are now able to leverage cluster computational power to run your analytical jobs on.
 
 To do that open any of the analytical tools and select proper kernel/interpreter:
 
-**Jupyter** – goto Kernel and choose preferable interpreter between local and Computational resource ones. Currently we have added support of Python 2/3, Spark, Scala, R into Jupyter.
+**Jupyter** – go to Kernel and choose preferable interpreter between local and Computational resource ones. Currently we have added support of Python 2/3, Spark, Scala, R in Jupyter.
 
 ![Jupiter](doc/jupiter.png)
 
-**Zeppelin** – goto Interpreter Biding menu and switch between local and Computational resource there. Once needed interpreter is selected click on Save.
+**Zeppelin** – go to Interpreter Biding menu and switch between local and Computational resource there. Once needed interpreter is selected click on "Save".
 
 ![Zeppelin](doc/zeppelin.png)
 
@@ -354,11 +363,11 @@
 ![RStudio](doc/rstudio.png)
 
 ---------------
-## Stop  Apache Spark cluster <a name="spark_stop"></a>
+## Stop Standalone Apache Spark cluster <a name="spark_stop"></a>
 
-Once you have stopped working with a spark cluster and you would like to release cloud resources for the sake of the costs, you might want to Stop Apache Spark cluster. You will be able to Start apache Spark cluster again after a while and proceed with your analytics.
+Once you have stopped working with Standalone Apache Spark cluster (Data Engine) and you need to release cloud resources for the sake of the costs, you might want to stop Standalone Apache Spark cluster. You are able to start Standalone Apache Spark cluster again after a while and proceed with your analytics.
 
-To Stop Apache Spark cluster click on <img src="doc/stop_icon.png" alt="stop" width="20"> button close to spark cluster alias.
+To stop Standalone Apache Spark cluster click on <img src="doc/stop_icon.png" alt="stop" width="20"> button close to Standalone Apache Spark cluster alias.
 
 Hit "YES" in confirmation popup.
 
@@ -366,48 +375,103 @@
     <img src="doc/spark_stop_confirm.png" alt="Spark stop confirm" width="400">
 </p>
 
-After you confirm your intent to Apache Spark cluster - the status will be changed to Stopping and will become Stopped in a while.
+After you confirm your intent to stop Standalone Apache Spark cluster - the status changes to "Stopping" and soon becomes "Stopped".
 
 ------------------
 ## Terminate Computational resource <a name="computational_terminate"></a>
 
-To release cluster computational resources click on <img src="doc/cross_icon.png" alt="cross" width="16"> button close to Computational resource alias. Confirm decommissioning of Computational resource by hitting Yes:
+To release computational resources click on <img src="doc/cross_icon.png" alt="cross" width="16"> button close to Computational resource alias. Confirm decommissioning of Computational resource by hitting "Yes":
 
 <p align="center"> 
     <img src="doc/emr_terminate_confirm.png" alt="Computational resource terminate confirm" width="400">
 </p>
 
-In a while Computational resource cluster will get **Terminated**. Corresponding cloud instances will also removed on cloud.
+In a while Computational resource gets "Terminated". Corresponding cloud instance also is removed on cloud.
+
+------------------
+## Scheduler <a name="scheduler"></a>
+
+Scheduler component allows to automatically schedule Start and Stop triggers for a Notebook/Computational, while 
+for Data Engine or Data Engine Service it can only trigger Stop or Terminate action correspondigly. There are 2 types of a scheduler:
+- Scheduler by time;
+- Scheduler by inactivity.
+
+Scheduler by time is for Notebook/Data Engine Start/Stop and for Data Engine/Data Engine Service termination.
+Scheduler by inactivity is for Notebook/Data Engine stopping.
+
+To create scheduler for a Notebook click on an <img src="doc/gear_icon.png" alt="gear" width="20"> icon in the "Actions" column for a needed Notebook and hit "Scheduler":
+
+<p align="center"> 
+    <img src="doc/notebook_menu_scheduler.png" alt="Notebook scheduler action" width="150">
+</p>
+
+Popup with following fields shows up:
+
+- start/finish dates - date range when scheduler is active;
+- start/end time - time when notebook should be running;
+- timezone - your time zone;
+- repeat on - days when scheduler should be active;
+- possibility to synchronize notebook scheduler with computational schedulers;
+- possibility not to stop notebook in case of running job on Standalone Apache Spark cluster.
+
+<p align="center"> 
+    <img src="doc/notebook_scheduler.png" alt="Notebook scheduler" width="400">
+</p>
+
+If you want to stop Notebook on exceeding idle time you should enable "Scheduler by inactivity", fill your inactivity period (in minutes) and click on "Save" button. Notebook is stopped upon exceeding idle time value.
+
+<p align="center"> 
+    <img src="doc/scheduler_by_inactivity.png" alt="Scheduler by Inactivity.png" width="400">
+</p>
+
+Also scheduler can be configured for a Standalone Apache Spark cluster. To configure scheduler for Standalone Apache Spark cluster click on this icon <img src="doc/icon_scheduler_computational.png" alt="scheduler_computational" width="16">:
+
+<p align="center"> 
+    <img src="doc/computational_scheduler_create.png" alt="Computational scheduler create" width="400">
+</p>
+
+There is a possibility to inherit scheduler start settings from notebook, if such scheduler is present:
+
+<p align="center"> 
+    <img src="doc/computational_scheduler.png" alt="Computational scheduler" width="400">
+</p>
+
+Notebook/Standalone Apache Spark cluster is started/stopped automatically after scheduler setting.
+Please also note that if notebook is configured to be stopped, all running data engines assosiated with is stopped (for Standalone Apache Spark cluster) or terminated (for data engine serice) with notebook.
+
+After login user is notified  that corresponding resources are about to be stopped/terminated in some time.
+
+<p align="center"> 
+    <img src="doc/scheduler reminder.png" alt="Scheduler reminder" width="400">
+</p>
 
 --------------------------------
 ## Collaboration space <a name="collaboration_space"></a>
 
 ### Manage Git credentials <a name="git_creds"></a>
 
-To work with Git (pull, push) via UI tool (ungit) you could add multiple credentials in DLab UI, which will be set on all running instances with analytical tools.
+To work with Git (pull, push) via UI tool (ungit) you could add multiple credentials in DLab UI, which are set on all running instances with analytical tools.
 
-When you click on the button "Git credentials" – following popup will show up:
+When you click on the button "Git credentials" – following popup shows up:
 
 <p align="center"> 
     <img src="doc/git_creds_window.png" alt="Git_creds_window" width="760">
 </p>
 
 In this window you need to add:
--   Your Git server hostname, without **http** or **https**, for example: gitlab.com, github.com, or your internal GitLab server, which can be deployed with DLab.
+-   Your Git server hostname, without **http** or **https**, for example: gitlab.com, github.com, bitbucket.com, or your internal Git server.
 -   Your Username and Email - used to display author of commit in git.
 -   Your Login and Password - for authorization into git server.
 
-**Note:** If you have GitLab server, which was deployed with DLab, you should use your LDAP credentials for access to GitLab.
+Once all fields are filled in and you click on "Assign" button, you see the list of all your Git credentials.
 
-Once all fields are filled in and you click on "Assign" button, you will see the list of all your Git credentials.
-
-Clicking on "Apply changes" button, your credentials will be sent to all running instances with analytical tools. It takes a few seconds for changes to be applied.
+Clicking on "Apply changes" button, your credentials are sent to all running instances with analytical tools. It takes a few seconds for changes to be applied.
 
 <p align="center"> 
     <img src="doc/git_creds_window2.png" alt="Git_creds_window1" width="760">
 </p>
 
-On this tab you can also edit your credentials (click on pen icon) or delete (click on bin icon).
+On this tab you can also edit your credentials (click on pen icon <img src="doc/pen_icon.png" alt="pen" width="15">) or delete (click on bin icon <img src="doc/bin_icon.png" alt="bin" width="15">).
 
 ### Git UI tool (ungit) <a name="git_ui"></a>
 
@@ -417,7 +481,7 @@
     <img src="doc/notebook_info.png" alt="Git_ui_link" width="520">
 </p>
 
-Before start working with git repositories, you need to change working directory on the top of window to:
+Before start working with Git repositories, you need to change working directory on the top of window to:
 
 **/home/dlab-user/** or **/opt/zeppelin/notebook** for Zeppelin analytical tool and press Enter.
 
@@ -431,131 +495,154 @@
 
 ![Git_ui_ungit_work](doc/ungit_work.png)
 
-On the top of window in the red field UI show us changed or new files to commit. You can uncheck or add some files to gitignore.
+On the top of window in the red field UI shows us changed or new files to commit. You can uncheck or add some files to gitignore.
 
 **Note:** Git always checks you credentials. If this is your first commit after adding/changing credentials and after clicking on "Commit" button nothing happened - just click on "Commit" button again.
 
 On the right pane of window you also can see buttons to fetch last changes of repository, add upstreams and switch between branches.
 
-To see all modified files - click on the "circle" button on the center:
+To see all modified files - click on the "Circle" button on the center:
 
 ![Git_ui_ungit_changes](doc/ungit_changes.png)
 
-After commit you will see your local version and remote repository. To push you changes - click on your current branch and press "Push" button.
+After commit you see your local version and remote repository. To push you changes - click on your current branch and press "Push" button.
 
 ![Git_ui_ungit_push](doc/ungit_push.png)
 
-Also clicking on "circle" button you can uncommit or revert changes.
+Also clicking on "Circle" button you can uncommit or revert changes.
 
 --------------------------------
-# DLab Health Status Page <a name="health_page"></a>
+# Administration <a name="administration"></a>
 
-Health Status page is an administration page allowing users to start/stop/recreate gateway node. This might be useful in cases when someone manually deleted corresponding Edge node instance from cloud. This would have made DLab as an application corrupted in general. If any actions are manually done to Edge node instance directly via Cloud Web Console – those changes will be synchronized with DLab automatically and shortly Edge Node status will be updated in DLab.
+## Manage roles <a name="manage_roles"></a>
 
-To access Health status page either navigate to it via main menu:
-
-<p align="center"> 
-    <img src="doc/main_menu.png" alt="Main menu" width="250">
-</p>
-
-or by clicking on an icon close to logged in user name in the top right
-corner of the DLab:
-
--   green ![OK](doc/status_icon_ok.png), if Edge node status is Running;
--   red ![Error](doc/status_icon_error.png),if Edge node is Stopped or Terminated;
-
-![Health_status](doc/health_status.png)
-
-To Stop Edge Node please click on actions icon on Health Status page and hit "Stop".
-
-<p align="center"> 
-    <img src="doc/edge_stop.png" alt="EDGE stop" width="150">
-</p>
-
-Confirm you want to stop Edge node by clicking "Yes":
-
-<p align="center"> 
-    <img src="doc/edge_stop_confirm.png" alt="EDGE stop confirm" width="400">
-</p>
-
-In case you Edge node is Stopped or Terminated – you will have to Start or Recreate it correspondingly to proceed working with DLab. This can done as well via context actions menu.
-
-### Backup <a name="backup"></a>
-
-Administrator can use backup functionality. In order to do it click Backup button. "Backup options" popup will show-up. You can choose a preferable option to be backed up.
-
-<p align="center"> 
-    <img src="doc/backup_options.png" alt="Backup options" width="400">
-</p>
-
-Confirm you want to do backup by clicking "Apply".
-
-### Manage environment <a name="manage_environment"></a>
-
-Administrator can manage users environment clicking on Manage environment button. "Manage environment" popup will show-up. All users environments will be shown which at least one instance has Running status:
-
-<p align="center"> 
-    <img src="doc/manage_environment.png" alt="Manage environment" width="520">
-</p>
-
-If Administrator hit "Stop" icon <img src="doc/stop_icon_env.png" alt="stop" width="22"> all running instances except for dataengine service will be stopped and dataengine service will be terminated. User will be able to Start instances again except for dataengine service after a while and proceed with his analytics.
-
-If Administrator hit "Terminate" icon <img src="doc/terminate_icon_env.png" alt="terminate" width="22"> all running and stopped instances will be terminated. User will not be able to Start the inctance which has been Terminated. Instead, user will have to Upload his personal public key or Generate ssh key pairs.
-
-Administrator should confirm user environment stopping or termination by clicking Yes:
-
-<p align="center"> 
-    <img src="doc/manage_env_confirm.png" alt="Manage environment confirm" width="550">
-</p>
-
-Administrator can manage total billing quota for DLab as well as billing quota per user(s).To do this enter appropriate number in text box(es) per user(s) or/and total budget. Hit "Apply" button.
-
-### Manage roles <a name="manage_roles"></a>
-
-Administrator can choose what instance shape(s) and notebook(s) can be allowed for certain group(s) or user(s).
-To do it click on "Manage roles" button. "Manage roles" popup will show-up:
+Administrator can choose what instance shape(s), notebook(s) and computational resource are supposed to create for certain group(s) or user(s). Administrator can also assign administrator per project, who is able to manage roles within particular project.
+To do it click on "Add group" button. "Add group" popup shows up:
 
 <p align="center"> 
     <img src="doc/manage_role.png" alt="Manage roles" width="780">
 </p>
 
-To add group enter group name, choose certain action which should be allowed for group and also you can add discrete user(s) (not mandatory) and then click "Create" button.
-New group will be added and appears on "Manage roles" popup.
+Roles consist of:
+- Administration - allow to execute administrative operation for the whole DLab or administrative operation only per project;
+- Billing - allow to view billing only the own resources or all users;
+- Compute - list of Compute types which are supposed for creation;
+- Compute shapes - list of Compute shapes which are supposed for creation;
+- Notebook - list of Notebook templates which are supposed for creation;
+- Notebook shapes - list of Notebook shapes which are supposed for creation.
 
-Administrator can remove group or user. For that you should only click on "Delete group" button for certain group or click on delete icon <img src="doc/cross_icon.png" alt="delete" width="16"> for particular user. After that Hit "Yes" in confirmation popup.
+<p align="center"> 
+    <img src="doc/roles.png" alt="Roles" width="450">
+</p>
+
+To add group enter group name, choose certain action which should be allowed for group and also you can add discrete user(s) (not mandatory) and then click "Create" button.
+After addidng the group it appears on "Manage roles" popup.
+
+Administrator can remove group or user. For that you should only click on bin icon <img src="doc/bin_icon.png" alt="bin" width="15">for certain group or for icon <img src="doc/delete_btn.png" alt="delete" width="13"> for particular user. After that hit "Yes" in confirmation popup.
 
 <p align="center"> 
     <img src="doc/delete_group.png" alt="Delete group" width="780">
 </p>
 
-### SSN monitor <a name="ssn_monitor"></a>
+## Project management <a name="project_management"></a>
 
-Administrator can monitor SSN HDD, Memory and CPU. 
-Clicking on "SSN monitor button" will open "SSN monitor" popup. 
-There are three tabs on  'SSN monitor' popup: CPU, HDD, Memory:
+After project creation (this step is described in [create project](#setup_edge_node)) administrator is able to manage the project by clicking on gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the "Actions" column for the needed project.
 
 <p align="center"> 
-    <img src="doc/cpu.png" alt="SSN CPU" width="480">
+    <img src="doc/project_view.png" alt="Project view" width="780">
 </p>
 
-<p align="center"> 
-    <img src="doc/memory.png" alt="SSN memory" width="480">
-</p>
+The following menu shows up:
 
 <p align="center"> 
-    <img src="doc/hdd.png" alt="SSN HDD" width="480">
+    <img src="doc/project_menu.png" alt="Project menu" width="150">
 </p>
 
+Administrator can edit already existing project:
+- Add or remove group;
+- Add new endpoint;
+- Switch off/on 'Use shared image' option.
+
+To edit the project hit "Edit project" and choose option which you want to add, remove or change. For applying changes click on "Update" button.
+
+To stop Edge node hit "Stop edge node". After that confirm "OK" in confirmation popup. All related instances change its status from 'Running' to "Stopping" and soon become "Stopped". You are able to start Edge node again after a while and proceed with your work. Do not forget to start notebook again if you want to continue with your analytics. Because start Edge node does not start related instances.
+
+To terminate Edge node hit "Terminate edge node". After that confirm "OK" in confirmation popup. All related instances change its status to "Terminating" and soon become "Terminated".
+
+## Environment management <a name="environment_management"></a>
+
+DLab Environment Management page is an administration page allowing adminstrator to see the list of all users environments and to stop/terminate all of them.
+
+To access Environment management page either navigate to it via main menu:
+
+<p align="center"> 
+    <img src="doc/environment_management.png" alt="Environment management">
+</p>
+
+To stop or terminate the Notebook click on a gear icon <img src="doc/gear_icon.png" alt="gear" width="20"> in the "Actions" column for a needed Notebook and hit "Stop" or "Terminate" action:
+<p align="center"> 
+    <img src="doc/manage_env_actions.png" alt="Manage environment actions" width="160">
+</p>
+
+**NOTE:** Connected Data Engine Server is terminated and related Data Engine is stopped during Notebook stopping. During Notebook termination related Computational resources  are automatically terminated. 
+
+To stop or release specific cluster click an appropriate button close to cluster alias.
+
+<p align="center"> 
+    <img src="doc/managemanage_resource_actions.png" alt="Manage resource action" width="300">
+</p>
+
+Confirm stopping/decommissioning of the Computational resource by hitting "Yes":
+
+<p align="center"> 
+    <img src="doc/manage_env_confirm.png" alt="Manage environment action confirm" width="400">
+</p>
+
+**NOTE:** Terminate action is available only for notebooks and computational resources, not for Edge Nodes.
+
+### Multiple Cloud Endpoints <a name="multiple_cloud_endpoints"></a>
+
+Administrator can connect to any of Cloud endpoints: AWS, GCP, Azure. For that administrator should click on "Endpoints" button. "Connect endpoint" popup shows up:
+
+<p align="center"> 
+    <img src="doc/connect_endpoint.png" alt="Connect endpoint" width="520">
+</p>
+
+Once all fields are filled in and you click on "Connect" button, you are able to see the list of all your added endpoints on "Endpoint list" tab:
+
+<p align="center"> 
+    <img src="doc/endpoint_list.png" alt="Endpoint list" width="520">
+</p>
+
+Administrator can deactivate whole analytical environment via bin icon <img src="doc/bin_icon.png" alt="bin" width="15">. And all related instances change its satuses to "Terminating" and soon become "Terminated".
+
+### Manage DLab quotas <a name="manage_dlab_quotas"></a>
+
+Administrator can set quotas per project and for the whole DLab. To do it click on "Manage DLab quotas" button. "Manage DLab quotas" popup shows up. Administrator can see all active project:
+
+<p align="center"> 
+    <img src="doc/manage_environment.png" alt="Manage environment" width="520">
+</p>
+
+After filling fields and clicking on "Apply" button, new quotas are used for project and DLab.
+If project and DLab quotas are exceeded the warning shows up during login.
+
+<p align="center" class="facebox-popup"> 
+    <img src="doc/exceeded quota.png" alt="Exceeded quota" width="400">
+</p>
+
+In such case user cannot create new instance and already "Running" instance changes its status to "Stopping", except for Data Engine Service (its status changes "Terminating") and soon becomes "Stopped" or "Terminated" appropriately.
+
 --------------------------------
+
 # DLab Billing report <a name="billing_page"></a>
 
 On this page you can see all billing information, including all costs assosiated with service base name of SSN.
 
 ![Billing page](doc/billing_page.png)
 
-In the header you can see 3 fields:
+In the header you can see 2 fields:
 -   Service base name of your environment
--   Resource tag ID
 -   Date period of available billing report
 
 On the center of header you can choose period of report in datepicker:
@@ -566,49 +653,10 @@
 
 You can save billing report in csv format hitting "Export" button.
 
-You can also filter data by each column:
+You can also filter data by environment name, user, project, resource type, instance size, product. 
+On top of that you can sort data by user, project, service charges.
 
-![Billing filter](doc/billing_filter.png)
-
-**Note:** Administrator can see billing report of all users, and only he can see/filter "User" column.
-
-In the footer of billing report, you can see Total cost for all environments.
-
---------------------------------
-# DLab Environment Management Page <a name="environment_management"></a>
-
-DLab Environment Management page is an administration page allowing admins to show the list of all users` environments and to stop/terminate all of them of separate specific resource.
-
-To access Environment management page either navigate to it via main menu:
-
-<p align="center"> 
-    <img src="doc/main_menu_env.png" alt="Main menu" width="250">
-</p>
-
-<p align="center"> 
-    <img src="doc/environment_management.png" alt="Environment management">
-</p>
-
-To Stop or Terminate the Notebook click on a gear icon gear in the Actions column for a needed Notebook and hit Stop or Terminate action:
-<p align="center"> 
-    <img src="doc/manage_env_actions.png" alt="Manage environment actions" width="160">
-</p>
-
-Any Computational resources except for Spark clusters will be automatically terminated and Spark clusters will be stopped in case of Stop action hitting, and all resources will be killed in case of Terminate action hitting.
-
-To stop or release specific cluster click an appropriate button close to cluster alias.
-
-<p align="center"> 
-    <img src="doc/managemanage_resource_actions.png" alt="Manage resource action" width="300">
-</p>
-
-Confirm stopping/decommissioning of the Computational resource by hitting Yes:
-
-<p align="center"> 
-    <img src="doc/manage_env_confirm.png" alt="Manage environment action confirm" width="400">
-</p>
-
-**NOTE:** terminate action is available only for notebooks and computational resources, not for Edge Nodes.
+In the footer of billing report, you can see "Total" cost for all environments.
 
 --------------------------------
 
@@ -628,61 +676,3 @@
 Once your list of filtered by any of the columns, icon <img src="doc/filter_icon.png" alt="filter" width="16"> changes to <img src="doc/sort_icon.png" alt="filter" width="16"> for a filtered columns only.
 
 There is also an option for quick and easy way to filter out all inactive instances (Failed and Terminated) by clicking on “Show active” button in the ribbon. To switch back to the list of all resources, click on “Show all”.
-
-# Scheduler <a name="scheduler"></a>
-
-Scheduler component allows to automatically schedule start/stop of notebook/cluster. There are 2 types of schedulers available:
-- notebook scheduler;
-- data engine scheduler (currently spark cluster only);
-
-To create scheduler for a notebook click on a <img src="doc/gear_icon.png" alt="gear" width="20"> icon in the Actions column for a needed Notebook and hit Scheduler:
-
-<p align="center"> 
-    <img src="doc/notebook_menu_scheduler.png" alt="Notebook scheduler action" width="150">
-</p>
-After clicking you will see popup with the following fields:
-
-- start/finish dates - date range when scheduler is active;
-- start/end time - time when notebook should be running;
-- offset - your zone offset;
-- repeat on - days when scheduler should be active
-- possibility to synchronize notebook scheduler with computational schedulers
-
-<p align="center"> 
-    <img src="doc/notebook_scheduler.png" alt="Notebook scheduler" width="400">
-</p>
-
-Also scheduler can be configured for a spark cluster. To configure scheduler for spark cluster <img src="doc/icon_scheduler_computational.png" alt="scheduler_computational" width="16"> should be clicked (near computational status):
-
-<p align="center"> 
-    <img src="doc/computational_scheduler_create.png" alt="Computational scheduler create" width="400">
-</p>
-
-There is a possibility to inherit scheduler start settings from notebook, if such scheduler is present:
-
-<p align="center"> 
-    <img src="doc/computational_scheduler.png" alt="Computational scheduler" width="400">
-</p>
-
-Once any scheduler is set up, notebook/spark cluster will be started/stopped automatically.
-Please also note that if notebook is configured to be stopped, all running data engines assosiated with it will be stopped (for spark cluster) or terminated (for data engine serice) with notebook.
-
-After login user will be notified  that corresponding resources are about to be stopped/terminated in some time.
-
-<p align="center"> 
-    <img src="doc/scheduler reminder.png" alt="Scheduler reminder" width="400">
-</p>
-
-# Key reupload <a name="key_reupload"></a>
-In case when user private key was corrupted, lost etc. DLAB provide a possibility to reupload user public key.
-It can be done on manage environment page using ACTIONS menu on edge instance:
-
-<p align="center"> 
-    <img src="doc/reupload_key_action.png" alt="Reupload key action" width="200">
-</p>
-
-After that similar to create initial environment dialog appeared where you can upload new key or generate new key-pair:
- 
- <p align="center"> 
-     <img src="doc/reupload_key_dialog.png" alt="Reupload key dialog" width="400">
- </p>
diff --git a/build.properties b/build.properties
index d57adc1..d765398 100644
--- a/build.properties
+++ b/build.properties
@@ -16,4 +16,4 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-dlab.version=2.2
+dlab.version=2.3
\ No newline at end of file
diff --git a/doc/billing_filter.png b/doc/billing_filter.png
index 09a0acd..e1dbd78 100644
--- a/doc/billing_filter.png
+++ b/doc/billing_filter.png
Binary files differ
diff --git a/doc/billing_page.png b/doc/billing_page.png
index cc08102..33bd674 100644
--- a/doc/billing_page.png
+++ b/doc/billing_page.png
Binary files differ
diff --git a/doc/bin_icon.png b/doc/bin_icon.png
new file mode 100644
index 0000000..d289b5f
--- /dev/null
+++ b/doc/bin_icon.png
Binary files differ
diff --git a/doc/computational_scheduler.png b/doc/computational_scheduler.png
index b00c626..d87a22f 100644
--- a/doc/computational_scheduler.png
+++ b/doc/computational_scheduler.png
Binary files differ
diff --git a/doc/computational_scheduler_create.png b/doc/computational_scheduler_create.png
index 463351d..5d1ef24 100644
--- a/doc/computational_scheduler_create.png
+++ b/doc/computational_scheduler_create.png
Binary files differ
diff --git a/doc/connect_endpoint.png b/doc/connect_endpoint.png
new file mode 100644
index 0000000..054b3e8
--- /dev/null
+++ b/doc/connect_endpoint.png
Binary files differ
diff --git a/doc/create_notebook_from_ami.png b/doc/create_notebook_from_ami.png
index 7e4453e..11cfde0 100644
--- a/doc/create_notebook_from_ami.png
+++ b/doc/create_notebook_from_ami.png
Binary files differ
diff --git a/doc/dataproc_create.png b/doc/dataproc_create.png
new file mode 100644
index 0000000..cbab3f4
--- /dev/null
+++ b/doc/dataproc_create.png
Binary files differ
diff --git a/doc/delete_btn.png b/doc/delete_btn.png
new file mode 100644
index 0000000..6229abf
--- /dev/null
+++ b/doc/delete_btn.png
Binary files differ
diff --git a/doc/delete_group.png b/doc/delete_group.png
index d5c38e3..9b7c878 100644
--- a/doc/delete_group.png
+++ b/doc/delete_group.png
Binary files differ
diff --git a/doc/dlab_aws.png b/doc/dlab_aws.png
new file mode 100644
index 0000000..e320dfe
--- /dev/null
+++ b/doc/dlab_aws.png
Binary files differ
diff --git a/doc/dlab_azure.png b/doc/dlab_azure.png
new file mode 100644
index 0000000..ccdf3d9
--- /dev/null
+++ b/doc/dlab_azure.png
Binary files differ
diff --git a/doc/dlab_gcp.png b/doc/dlab_gcp.png
new file mode 100644
index 0000000..f16be82
--- /dev/null
+++ b/doc/dlab_gcp.png
Binary files differ
diff --git a/doc/emr_creating.png b/doc/emr_creating.png
index 7fb7fde..1e20418 100644
--- a/doc/emr_creating.png
+++ b/doc/emr_creating.png
Binary files differ
diff --git a/doc/emr_terminate_confirm.png b/doc/emr_terminate_confirm.png
index b1fa871..5eb515e 100644
--- a/doc/emr_terminate_confirm.png
+++ b/doc/emr_terminate_confirm.png
Binary files differ
diff --git a/doc/endpoint_list.png b/doc/endpoint_list.png
new file mode 100644
index 0000000..ea8586f
--- /dev/null
+++ b/doc/endpoint_list.png
Binary files differ
diff --git a/doc/environment_management.png b/doc/environment_management.png
index e4c2cda..ba0399c 100644
--- a/doc/environment_management.png
+++ b/doc/environment_management.png
Binary files differ
diff --git a/doc/git_creds_window.png b/doc/git_creds_window.png
index fdf7a41..ed41936 100644
--- a/doc/git_creds_window.png
+++ b/doc/git_creds_window.png
Binary files differ
diff --git a/doc/git_creds_window2.png b/doc/git_creds_window2.png
index 1481df0..f13444f 100644
--- a/doc/git_creds_window2.png
+++ b/doc/git_creds_window2.png
Binary files differ
diff --git a/doc/main_page.png b/doc/main_page.png
index 4338603..b6f1e17 100644
--- a/doc/main_page.png
+++ b/doc/main_page.png
Binary files differ
diff --git a/doc/main_page2.png b/doc/main_page2.png
index 5305a05..3d3af40 100644
--- a/doc/main_page2.png
+++ b/doc/main_page2.png
Binary files differ
diff --git a/doc/main_page3.png b/doc/main_page3.png
index 255de05..1812925 100644
--- a/doc/main_page3.png
+++ b/doc/main_page3.png
Binary files differ
diff --git a/doc/main_page_filter.png b/doc/main_page_filter.png
index 5818548..cd764ec 100644
--- a/doc/main_page_filter.png
+++ b/doc/main_page_filter.png
Binary files differ
diff --git a/doc/manage_env_confirm.png b/doc/manage_env_confirm.png
index 91f3d30..ae4b543 100644
--- a/doc/manage_env_confirm.png
+++ b/doc/manage_env_confirm.png
Binary files differ
diff --git a/doc/manage_environment.png b/doc/manage_environment.png
index ead01e1..73060ff 100644
--- a/doc/manage_environment.png
+++ b/doc/manage_environment.png
Binary files differ
diff --git a/doc/manage_role.png b/doc/manage_role.png
index 152cf7c..9db76c2 100644
--- a/doc/manage_role.png
+++ b/doc/manage_role.png
Binary files differ
diff --git a/doc/managemanage_resource_actions.png b/doc/managemanage_resource_actions.png
index 23c58d4..bd1394c 100644
--- a/doc/managemanage_resource_actions.png
+++ b/doc/managemanage_resource_actions.png
Binary files differ
diff --git a/doc/notebook_create.png b/doc/notebook_create.png
index 18a674b..9ca407e 100644
--- a/doc/notebook_create.png
+++ b/doc/notebook_create.png
Binary files differ
diff --git a/doc/notebook_info.png b/doc/notebook_info.png
index 4cc01a2..83e8e22 100644
--- a/doc/notebook_info.png
+++ b/doc/notebook_info.png
Binary files differ
diff --git a/doc/notebook_libs_status.png b/doc/notebook_libs_status.png
index 5f49722..8aa861d 100644
--- a/doc/notebook_libs_status.png
+++ b/doc/notebook_libs_status.png
Binary files differ
diff --git a/doc/notebook_scheduler.png b/doc/notebook_scheduler.png
index 31bd9ac..81502c3 100644
--- a/doc/notebook_scheduler.png
+++ b/doc/notebook_scheduler.png
Binary files differ
diff --git a/doc/notebook_terminated.png b/doc/notebook_terminated.png
index fb6399b..408e5ee 100644
--- a/doc/notebook_terminated.png
+++ b/doc/notebook_terminated.png
Binary files differ
diff --git a/doc/notebook_terminating.png b/doc/notebook_terminating.png
index d20b967..b62a492 100644
--- a/doc/notebook_terminating.png
+++ b/doc/notebook_terminating.png
Binary files differ
diff --git a/doc/pen_icon.png b/doc/pen_icon.png
new file mode 100644
index 0000000..c6a3a7f
--- /dev/null
+++ b/doc/pen_icon.png
Binary files differ
diff --git a/doc/project_menu.png b/doc/project_menu.png
new file mode 100644
index 0000000..c6d4976
--- /dev/null
+++ b/doc/project_menu.png
Binary files differ
diff --git a/doc/project_view.png b/doc/project_view.png
new file mode 100644
index 0000000..2415ac5
--- /dev/null
+++ b/doc/project_view.png
Binary files differ
diff --git a/doc/roles.png b/doc/roles.png
new file mode 100644
index 0000000..f7468a6
--- /dev/null
+++ b/doc/roles.png
Binary files differ
diff --git a/doc/scheduler_by_inactivity.png b/doc/scheduler_by_inactivity.png
new file mode 100644
index 0000000..decebac
--- /dev/null
+++ b/doc/scheduler_by_inactivity.png
Binary files differ
diff --git a/doc/spark_stop_confirm.png b/doc/spark_stop_confirm.png
index 59b6bf9..7b6bc34 100644
--- a/doc/spark_stop_confirm.png
+++ b/doc/spark_stop_confirm.png
Binary files differ
diff --git a/doc/upload_or_generate_user_key.png b/doc/upload_or_generate_user_key.png
index 2766334..6d6e6e1 100644
--- a/doc/upload_or_generate_user_key.png
+++ b/doc/upload_or_generate_user_key.png
Binary files differ
diff --git a/infrastructure-provisioning/scripts/POST_DEPLOYMENT.md b/infrastructure-provisioning/scripts/POST_DEPLOYMENT.md
new file mode 100644
index 0000000..aee28b7
--- /dev/null
+++ b/infrastructure-provisioning/scripts/POST_DEPLOYMENT.md
@@ -0,0 +1,44 @@
+### Prerequisites for DLab post-deployment
+
+- Service account with following roles:
+```
+Compute Admin
+Compute Network Admin
+Dataproc Administrator
+Role Administrator
+Service Account Admin
+Service Account User
+Project IAM Admin
+Storage Admin 
+BigQuery Data Viewer
+BigQuery Job User
+```
+- Google Cloud Storage JSON API should be enabled
+- Keycloak server with specific client for Dlab UI (could be dpeloyed with Kecylaok deployment script)
+
+Service account should be created manually and attached to the instance with post-deployment script.
+
+### Executing post-deployment script
+
+To configure SSN node, following steps should be executed:
+
+- Connect to the instance via SSH and run the following commands:
+```
+/usr/bin/python /opt/dlab/sources/infrastructure-provisioning/scripts/post-deployment_configuration.py
+    --keycloak_realm_name <value>
+    --keycloak_auth_server_url <value>
+    --keycloak_client_name <value>
+    --keycloak_client_secret <value>
+    --keycloak_user <value>
+    --keycloak_admin_password <value>
+```
+
+List of parameters for SSN node post-deployment script:
+| Parameter                     | Description/Value                                                                   |
+|-------------------------------|-------------------------------------------------------------------------------------|
+| keycloak\_realm\_name         | Keycloak realm name                                                                 |
+| keycloak\_auth\_server\_url   | Url of Keycloak auth server                                                         |
+| keycloak\_client\_name        | Name of client for Dlab UI                                                          |
+| keycloak\_client\_secret      | Secret of client for Dlab UI                                                        |
+| kkeycloak\_user               | Keycloak user with administrator permissions                                        |
+| keycloak\_admin\_password     | Password for Keycloak user with administrator permissions                           |
\ No newline at end of file
diff --git a/infrastructure-provisioning/scripts/deploy_dlab.py b/infrastructure-provisioning/scripts/deploy_dlab.py
index 54ca6b4..40b1485 100644
--- a/infrastructure-provisioning/scripts/deploy_dlab.py
+++ b/infrastructure-provisioning/scripts/deploy_dlab.py
@@ -36,10 +36,10 @@
 parser.add_argument('--conf_user_subnets_range', type=str, default='', help='Range of subnets which will be using for '
                                                                             'users environments. For example: '
                                                                             '10.10.0.0/24 - 10.10.10.0/24')
+parser.add_argument('--conf_private_subnet_prefix', type=str, default='24', help='Private subnet prefix')
 parser.add_argument('--conf_additional_tags', type=str, default='', help='Additional tags in format '
                                                                          '"Key1:Value1;Key2:Value2"')
 parser.add_argument('--conf_image_enabled', type=str, default='', help='Enable or Disable creating image at first time')
-parser.add_argument('--conf_shared_image_enabled', type=str, default='', help='Enable or Disable shared images')
 parser.add_argument('--aws_user_predefined_s3_policies', type=str, default='', help='Predefined policies for users '
                                                                                     'instances')
 parser.add_argument('--aws_access_key', type=str, default='', help='AWS Access Key ID')
@@ -130,12 +130,19 @@
 parser.add_argument('--keycloak_auth_server_url', type=str, default='dlab', help='Keycloak auth server URL')
 parser.add_argument('--keycloak_client_name', type=str, default='dlab', help='Keycloak client name')
 parser.add_argument('--keycloak_client_secret', type=str, default='dlab', help='Keycloak client secret')
+parser.add_argument('--keycloak_user', type=str, default='dlab', help='Keycloak user')
+parser.add_argument('--keycloak_user_password', type=str, default='keycloak-user-password', help='Keycloak user password')
 parser.add_argument('--tags', type=str, default='line_item_operation,line_item_line_item_description', help='Column name in report file that '
                                                                                   'contains tags')
 parser.add_argument('--billing_dataset_name', type=str, default='', help='Name of GCP dataset (BigQuery service)'
                                                                          ' for billing')
 parser.add_argument('--default_endpoint_name', type=str, default='local', help='Name of localhost provisioning service,'
                                                                                'that created by default')
+parser.add_argument('--conf_stepcerts_enabled', type=str, default='false', help='Enable or disable step certificates')
+parser.add_argument('--conf_stepcerts_root_ca', type=str, default='', help='Step root CA')
+parser.add_argument('--conf_stepcerts_kid', type=str, default='', help='Step KID')
+parser.add_argument('--conf_stepcerts_kid_password', type=str, default='', help='Step KID password')
+parser.add_argument('--conf_stepcerts_ca_url', type=str, default='', help='Step CA URL')
 parser.add_argument('--action', required=True, type=str, default='', choices=['build', 'deploy', 'create', 'terminate'],
                     help='Available options: build, deploy, create, terminate')
 args = parser.parse_args()
diff --git a/infrastructure-provisioning/scripts/deploy_keycloak/deploy_keycloak.py b/infrastructure-provisioning/scripts/deploy_keycloak/deploy_keycloak.py
new file mode 100644
index 0000000..dce8a86
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_keycloak/deploy_keycloak.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+from fabric.api import *
+import argparse
+import sys
+import os
+from fabric.contrib.files import exists
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--os_user', type=str, default='')
+parser.add_argument('--public_ip_address', type=str, default='')
+parser.add_argument('--keyfile', type=str, default='')
+parser.add_argument('--keycloak_realm_name', type=str, default='')
+parser.add_argument('--keycloak_user', type=str, default='')
+parser.add_argument('--keycloak_user_password', type=str, default='')
+args = parser.parse_args()
+
+keycloak_version = "8.0.1"
+templates_dir = './infrastructure-provisioning/scripts/deploy_keycloak/templates/'
+external_port = "80"
+internal_port = "8080"
+private_ip_address = "127.0.0.1"
+
+def ensure_jre_jdk(os_user):
+    if not exists('/home/' + os_user + '/.ensure_dir/jre_jdk_ensured'):
+        try:
+            sudo('mkdir -p /home/' + os_user + '/.ensure_dir')
+            sudo('apt-get update')
+            sudo('apt-get install -y default-jre')
+            sudo('apt-get install -y default-jdk')
+            sudo('touch /home/' + os_user + '/.ensure_dir/jre_jdk_ensured')
+        except:
+            sys.exit(1)
+
+def configure_keycloak():
+    sudo('wget https://downloads.jboss.org/keycloak/' + keycloak_version + '/keycloak-' + keycloak_version + '.tar.gz -O /tmp/keycloak-' + keycloak_version + '.tar.gz')
+    sudo('tar -zxvf /tmp/keycloak-' + keycloak_version + '.tar.gz -C /opt/')
+    sudo('ln -s /opt/keycloak-' + keycloak_version + ' /opt/keycloak')
+    sudo('chown ' + args.os_user + ':' + args.os_user + ' -R /opt/keycloak-' + keycloak_version)
+    sudo('/opt/keycloak/bin/add-user-keycloak.sh -r master -u ' + args.keycloak_user + ' -p ' + args.keycloak_user_password) #create initial admin user in master realm
+    put(templates_dir + 'realm.json', '/tmp/' + args.keycloak_realm_name + '-realm.json')
+    put(templates_dir + 'keycloak.service', '/tmp/keycloak.service')
+    sudo("cp /tmp/keycloak.service /etc/systemd/system/keycloak.service")
+    sudo("sed -i 's|realm-name|" + args.keycloak_realm_name + "|' /tmp/" + args.keycloak_realm_name + "-realm.json")
+    sudo("sed -i 's|OS_USER|" + args.os_user + "|' /etc/systemd/system/keycloak.service")
+    sudo("sed -i 's|private_ip_address|" + private_ip_address + "|' /etc/systemd/system/keycloak.service")
+    sudo("sed -i 's|keycloak_realm_name|" + args.keycloak_realm_name + "|' /etc/systemd/system/keycloak.service")
+    sudo("systemctl daemon-reload")
+    sudo("systemctl enable keycloak")
+    sudo("systemctl start keycloak")
+
+def configure_nginx():
+    sudo('apt install -y nginx')
+    put(templates_dir + 'nginx.conf', '/tmp/nginx.conf')
+    sudo("cp /tmp/nginx.conf /etc/nginx/conf.d/nginx.conf")
+    sudo("sed -i 's|80|81|' /etc/nginx/sites-enabled/default")
+    sudo("sed -i 's|external_port|" + external_port + "|' /etc/nginx/conf.d/nginx.conf")
+    sudo("sed -i 's|internal_port|" + internal_port + "|' /etc/nginx/conf.d/nginx.conf")
+    sudo("sed -i 's|private_ip_address|" + private_ip_address + "|' /etc/nginx/conf.d/nginx.conf")
+    sudo("systemctl daemon-reload")
+    sudo("systemctl enable nginx")
+    sudo("systemctl restart nginx")
+
+if __name__ == "__main__":
+    local("sudo mkdir /logs/keycloak -p")
+    local('sudo chown ' + args.os_user + ':' + args.os_user + ' -R /logs/keycloak')
+    local_log_filename = "keycloak_deployment_script.log"
+    local_log_filepath = "/logs/keycloak/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    print("Configure connections")
+    if args.public_ip_address != '':
+        try:
+            env['connection_attempts'] = 100
+            env.key_filename = [args.keyfile]
+            env.host_string = '{}@{}'.format(args.os_user, args.public_ip_address)
+        except Exception as err:
+            print("Failed establish connection. Excpeption: " + str(err))
+            sys.exit(1)
+    else:
+        try:
+            env['connection_attempts'] = 100
+            env.key_filename = [args.keyfile]
+            env.host_string = '{}@{}'.format(args.os_user, private_ip_address)
+        except Exception as err:
+            print("Failed establish connection. Excpeption: " + str(err))
+            sys.exit(1)
+
+    print("Install Java")
+    ensure_jre_jdk(args.os_user)
+
+    try:
+        print("installing Keycloak")
+        configure_keycloak()
+    except Exception as err:
+        print("Failed keycloak install: " + str(err))
+        sys.exit(1)
+
+    try:
+        print("installing nginx")
+        configure_nginx()
+    except Exception as err:
+        print("Failed nginx install: " + str(err))
+        sys.exit(1)
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/scripts/deploy_keycloak/templates/keycloak.service
similarity index 64%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/scripts/deploy_keycloak/templates/keycloak.service
index 951fdd7..ebb2070 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/scripts/deploy_keycloak/templates/keycloak.service
@@ -19,22 +19,19 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+[Unit]
+Description=The Keycloak Application Server
+After=syslog.target network.target
+Before=httpd.service
 
+[Service]
+Type=simple
+PIDFile=/var/run/keycloak/keycloak-%H.pid
+ExecStart=/opt/keycloak/bin/standalone.sh -Dkeycloak.migration.action=import -Dkeycloak.migration.provider=singleFile -Dkeycloak.migration.file=/tmp/keycloak_realm_name-realm.json -Dkeycloak.migration.strategy=IGNORE_EXISTING -b private_ip_address
+User=OS_USER
+Group=OS_USER
+WorkingDirectory=/opt/keycloak
 
-USER root
+[Install]
+WantedBy=multi-user.target
 
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/scripts/deploy_keycloak/templates/nginx.conf
similarity index 71%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/scripts/deploy_keycloak/templates/nginx.conf
index 951fdd7..32ffa5e 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/scripts/deploy_keycloak/templates/nginx.conf
@@ -19,22 +19,17 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+server {
+   listen external_port;
 
+   location / {
+       proxy_pass http://private_ip_address:internal_port;
+       proxy_set_header Host $host;
+       proxy_http_version 1.1;
+       proxy_set_header X-Real-IP         $remote_addr;
+       proxy_set_header X-Forwarded-For   $proxy_add_x_forwarded_for;
+       proxy_set_header X-Forwarded-Proto $scheme;
 
-USER root
+   }
+}
 
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
diff --git a/infrastructure-provisioning/scripts/deploy_keycloak/templates/realm.json b/infrastructure-provisioning/scripts/deploy_keycloak/templates/realm.json
new file mode 100644
index 0000000..bf7e94b
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_keycloak/templates/realm.json
@@ -0,0 +1,1207 @@
+{
+  "id": "realm-name",
+  "realm": "realm-name",
+  "notBefore": 0,
+  "revokeRefreshToken": false,
+  "refreshTokenMaxReuse": 0,
+  "accessTokenLifespan": 300,
+  "accessTokenLifespanForImplicitFlow": 900,
+  "ssoSessionIdleTimeout": 1800,
+  "ssoSessionMaxLifespan": 36000,
+  "ssoSessionIdleTimeoutRememberMe": 0,
+  "ssoSessionMaxLifespanRememberMe": 0,
+  "offlineSessionIdleTimeout": 2592000,
+  "offlineSessionMaxLifespanEnabled": false,
+  "offlineSessionMaxLifespan": 5184000,
+  "accessCodeLifespan": 60,
+  "accessCodeLifespanUserAction": 300,
+  "accessCodeLifespanLogin": 1800,
+  "actionTokenGeneratedByAdminLifespan": 43200,
+  "actionTokenGeneratedByUserLifespan": 300,
+  "enabled": true,
+  "sslRequired": "external",
+  "registrationAllowed": false,
+  "registrationEmailAsUsername": false,
+  "rememberMe": false,
+  "verifyEmail": false,
+  "loginWithEmailAllowed": true,
+  "duplicateEmailsAllowed": false,
+  "resetPasswordAllowed": false,
+  "editUsernameAllowed": false,
+  "bruteForceProtected": false,
+  "permanentLockout": false,
+  "maxFailureWaitSeconds": 900,
+  "minimumQuickLoginWaitSeconds": 60,
+  "waitIncrementSeconds": 60,
+  "quickLoginCheckMilliSeconds": 1000,
+  "maxDeltaTimeSeconds": 43200,
+  "failureFactor": 30,
+  "defaultRoles": [
+    "offline_access",
+    "uma_authorization"
+  ],
+  "requiredCredentials": [
+    "password"
+  ],
+  "otpPolicyType": "totp",
+  "otpPolicyAlgorithm": "HmacSHA1",
+  "otpPolicyInitialCounter": 0,
+  "otpPolicyDigits": 6,
+  "otpPolicyLookAheadWindow": 1,
+  "otpPolicyPeriod": 30,
+  "otpSupportedApplications": [
+    "FreeOTP",
+    "Google Authenticator"
+  ],
+  "scopeMappings": [
+    {
+      "clientScope": "offline_access",
+      "roles": [
+        "offline_access"
+      ]
+    }
+  ],
+  "clientScopes": [
+    {
+      "id": "9a25299e-8dd8-42ff-8975-bb8e2bad1dcb",
+      "name": "address",
+      "description": "OpenID Connect built-in scope: address",
+      "protocol": "openid-connect",
+      "attributes": {
+        "include.in.token.scope": "true",
+        "display.on.consent.screen": "true",
+        "consent.screen.text": "${addressScopeConsentText}"
+      },
+      "protocolMappers": [
+        {
+          "id": "fc2beb71-08f4-49be-8ce9-8794e21af889",
+          "name": "address",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-address-mapper",
+          "consentRequired": false,
+          "config": {
+            "user.attribute.formatted": "formatted",
+            "user.attribute.country": "country",
+            "user.attribute.postal_code": "postal_code",
+            "userinfo.token.claim": "true",
+            "user.attribute.street": "street",
+            "id.token.claim": "true",
+            "user.attribute.region": "region",
+            "access.token.claim": "true",
+            "user.attribute.locality": "locality"
+          }
+        }
+      ]
+    },
+    {
+      "id": "5def1b5f-9ad5-4042-b5cd-850338dd1f4b",
+      "name": "email",
+      "description": "OpenID Connect built-in scope: email",
+      "protocol": "openid-connect",
+      "attributes": {
+        "include.in.token.scope": "true",
+        "display.on.consent.screen": "true",
+        "consent.screen.text": "${emailScopeConsentText}"
+      },
+      "protocolMappers": [
+        {
+          "id": "96002715-a099-45bd-9766-f55eca916d49",
+          "name": "email verified",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-property-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "emailVerified",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "email_verified",
+            "jsonType.label": "boolean"
+          }
+        },
+        {
+          "id": "0baccf41-5c43-45c6-bce3-99ac0674b949",
+          "name": "email",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-property-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "email",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "email",
+            "jsonType.label": "String"
+          }
+        }
+      ]
+    },
+    {
+      "id": "44a62200-8cd3-4256-9360-50849d846c58",
+      "name": "microprofile-jwt",
+      "description": "Microprofile - JWT built-in scope",
+      "protocol": "openid-connect",
+      "attributes": {
+        "include.in.token.scope": "true",
+        "display.on.consent.screen": "false"
+      },
+      "protocolMappers": [
+        {
+          "id": "85715508-25c2-49c3-a5f4-8c02503bf210",
+          "name": "groups",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-realm-role-mapper",
+          "consentRequired": false,
+          "config": {
+            "multivalued": "true",
+            "user.attribute": "foo",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "groups",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "dad65d51-2a08-4b1e-adca-8f41f8ebf641",
+          "name": "upn",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-property-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "username",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "upn",
+            "jsonType.label": "String"
+          }
+        }
+      ]
+    },
+    {
+      "id": "f442cf48-f959-4826-9014-a536372ea010",
+      "name": "offline_access",
+      "description": "OpenID Connect built-in scope: offline_access",
+      "protocol": "openid-connect",
+      "attributes": {
+        "consent.screen.text": "${offlineAccessScopeConsentText}",
+        "display.on.consent.screen": "true"
+      }
+    },
+    {
+      "id": "1350646e-5728-4c4f-a77e-3199a3b22965",
+      "name": "phone",
+      "description": "OpenID Connect built-in scope: phone",
+      "protocol": "openid-connect",
+      "attributes": {
+        "include.in.token.scope": "true",
+        "display.on.consent.screen": "true",
+        "consent.screen.text": "${phoneScopeConsentText}"
+      },
+      "protocolMappers": [
+        {
+          "id": "5c376cd3-9863-4dcd-988f-c332e4ad3f87",
+          "name": "phone number",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "phoneNumber",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "phone_number",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "639eecd7-c70b-4155-9375-69d72078746e",
+          "name": "phone number verified",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "phoneNumberVerified",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "phone_number_verified",
+            "jsonType.label": "boolean"
+          }
+        }
+      ]
+    },
+    {
+      "id": "4ad78082-7bca-438a-a570-5104721cdb55",
+      "name": "profile",
+      "description": "OpenID Connect built-in scope: profile",
+      "protocol": "openid-connect",
+      "attributes": {
+        "include.in.token.scope": "true",
+        "display.on.consent.screen": "true",
+        "consent.screen.text": "${profileScopeConsentText}"
+      },
+      "protocolMappers": [
+        {
+          "id": "a7459ae0-8d15-4186-84df-765faf9a1381",
+          "name": "full name",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-full-name-mapper",
+          "consentRequired": false,
+          "config": {
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "userinfo.token.claim": "true"
+          }
+        },
+        {
+          "id": "7579f228-3f20-4712-b5ac-2aa260f85c23",
+          "name": "picture",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "picture",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "picture",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "e48681f5-3d5a-4872-9b73-fa414fa84f52",
+          "name": "profile",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "profile",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "profile",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "bec64208-fda8-43cf-a00e-bed90a4a720d",
+          "name": "middle name",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "middleName",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "middle_name",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "fc08bdb4-b2d0-4eff-b13a-ee5ffeb724ab",
+          "name": "nickname",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "nickname",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "nickname",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "1d6e5660-e13c-4c50-a866-3b88effcaf9b",
+          "name": "family name",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-property-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "lastName",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "family_name",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "692c3355-46bb-416f-ba65-958974532177",
+          "name": "locale",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "locale",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "locale",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "34620604-03d4-4c2d-bf59-cd6eb5fc3ec2",
+          "name": "website",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "website",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "website",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "9cc4af23-3f74-42ad-9215-82425c18d4b1",
+          "name": "gender",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "gender",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "gender",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "ef87dccd-13c6-4121-a600-3ba62fb1a375",
+          "name": "birthdate",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "birthdate",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "birthdate",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "f0dc60f7-4c39-4dfc-91ae-53136b754449",
+          "name": "zoneinfo",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "zoneinfo",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "zoneinfo",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "cfdb9a06-e45b-4dac-8f14-9e3c56ab952f",
+          "name": "given name",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-property-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "firstName",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "given_name",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "593b9e7f-16be-46af-b45d-974b46be6cea",
+          "name": "username",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-property-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "username",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "preferred_username",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "758d08a5-a3b9-40d3-8d05-e8d885fb88ed",
+          "name": "updated at",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-attribute-mapper",
+          "consentRequired": false,
+          "config": {
+            "userinfo.token.claim": "true",
+            "user.attribute": "updatedAt",
+            "id.token.claim": "true",
+            "access.token.claim": "true",
+            "claim.name": "updated_at",
+            "jsonType.label": "String"
+          }
+        }
+      ]
+    },
+    {
+      "id": "e7382dbf-7a20-46ed-be59-2d08c1d2169a",
+      "name": "role_list",
+      "description": "SAML role list",
+      "protocol": "saml",
+      "attributes": {
+        "consent.screen.text": "${samlRoleListScopeConsentText}",
+        "display.on.consent.screen": "true"
+      },
+      "protocolMappers": [
+        {
+          "id": "0e834165-d077-4394-8f28-23d2962b7a64",
+          "name": "role list",
+          "protocol": "saml",
+          "protocolMapper": "saml-role-list-mapper",
+          "consentRequired": false,
+          "config": {
+            "single": "false",
+            "attribute.nameformat": "Basic",
+            "attribute.name": "Role"
+          }
+        }
+      ]
+    },
+    {
+      "id": "aa2d0a3a-6365-43f6-a285-89743f9eebd7",
+      "name": "roles",
+      "description": "OpenID Connect scope for add user roles to the access token",
+      "protocol": "openid-connect",
+      "attributes": {
+        "include.in.token.scope": "false",
+        "display.on.consent.screen": "true",
+        "consent.screen.text": "${rolesScopeConsentText}"
+      },
+      "protocolMappers": [
+        {
+          "id": "ce6c098c-36bc-44f5-9533-656e9422a1c4",
+          "name": "client roles",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-client-role-mapper",
+          "consentRequired": false,
+          "config": {
+            "multivalued": "true",
+            "user.attribute": "foo",
+            "access.token.claim": "true",
+            "claim.name": "resource_access.${client_id}.roles",
+            "jsonType.label": "String"
+          }
+        },
+        {
+          "id": "6eb7ec9f-7c0e-4cdf-9e36-c738c9581a66",
+          "name": "audience resolve",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-audience-resolve-mapper",
+          "consentRequired": false,
+          "config": {}
+        },
+        {
+          "id": "1370e66c-bb21-4f0b-801b-18728d66e0e1",
+          "name": "realm roles",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-usermodel-realm-role-mapper",
+          "consentRequired": false,
+          "config": {
+            "multivalued": "true",
+            "user.attribute": "foo",
+            "access.token.claim": "true",
+            "claim.name": "realm_access.roles",
+            "jsonType.label": "String"
+          }
+        }
+      ]
+    },
+    {
+      "id": "fb58f333-7fcc-4f31-abfa-5b9198e90de7",
+      "name": "web-origins",
+      "description": "OpenID Connect scope for add allowed web origins to the access token",
+      "protocol": "openid-connect",
+      "attributes": {
+        "include.in.token.scope": "false",
+        "display.on.consent.screen": "false",
+        "consent.screen.text": ""
+      },
+      "protocolMappers": [
+        {
+          "id": "8e62443b-815c-4df7-b1cb-e10ae0c7e6cd",
+          "name": "allowed web origins",
+          "protocol": "openid-connect",
+          "protocolMapper": "oidc-allowed-origins-mapper",
+          "consentRequired": false,
+          "config": {}
+        }
+      ]
+    }
+  ],
+  "defaultDefaultClientScopes": [
+    "role_list",
+    "profile",
+    "email",
+    "roles",
+    "web-origins"
+  ],
+  "defaultOptionalClientScopes": [
+    "offline_access",
+    "address",
+    "phone",
+    "microprofile-jwt"
+  ],
+  "browserSecurityHeaders": {
+    "contentSecurityPolicyReportOnly": "",
+    "xContentTypeOptions": "nosniff",
+    "xRobotsTag": "none",
+    "xFrameOptions": "SAMEORIGIN",
+    "xXSSProtection": "1; mode=block",
+    "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';",
+    "strictTransportSecurity": "max-age=31536000; includeSubDomains"
+  },
+  "smtpServer": {},
+  "eventsEnabled": false,
+  "eventsListeners": [
+    "jboss-logging"
+  ],
+  "enabledEventTypes": [],
+  "adminEventsEnabled": false,
+  "adminEventsDetailsEnabled": false,
+  "components": {
+    "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [
+      {
+        "id": "56ed818b-3279-4afd-80d9-d738d441dacd",
+        "name": "Full Scope Disabled",
+        "providerId": "scope",
+        "subType": "anonymous",
+        "subComponents": {},
+        "config": {}
+      },
+      {
+        "id": "c2464ee0-1080-4730-88ac-71039312de19",
+        "name": "Allowed Protocol Mapper Types",
+        "providerId": "allowed-protocol-mappers",
+        "subType": "authenticated",
+        "subComponents": {},
+        "config": {
+          "allowed-protocol-mapper-types": [
+            "oidc-full-name-mapper",
+            "saml-user-attribute-mapper",
+            "oidc-address-mapper",
+            "saml-role-list-mapper",
+            "oidc-usermodel-attribute-mapper",
+            "saml-user-property-mapper",
+            "oidc-usermodel-property-mapper",
+            "oidc-sha256-pairwise-sub-mapper"
+          ]
+        }
+      },
+      {
+        "id": "a7c49446-9ecf-4189-a672-72f7d24efc17",
+        "name": "Allowed Client Scopes",
+        "providerId": "allowed-client-templates",
+        "subType": "anonymous",
+        "subComponents": {},
+        "config": {
+          "allow-default-scopes": [
+            "true"
+          ]
+        }
+      },
+      {
+        "id": "284c81c9-080d-4972-a05c-04d6e6fef800",
+        "name": "Allowed Client Scopes",
+        "providerId": "allowed-client-templates",
+        "subType": "authenticated",
+        "subComponents": {},
+        "config": {
+          "allow-default-scopes": [
+            "true"
+          ]
+        }
+      },
+      {
+        "id": "c9dc4351-2b9a-41b1-80b9-5e50e3f64988",
+        "name": "Allowed Protocol Mapper Types",
+        "providerId": "allowed-protocol-mappers",
+        "subType": "anonymous",
+        "subComponents": {},
+        "config": {
+          "allowed-protocol-mapper-types": [
+            "saml-user-attribute-mapper",
+            "saml-role-list-mapper",
+            "oidc-address-mapper",
+            "oidc-usermodel-property-mapper",
+            "oidc-usermodel-attribute-mapper",
+            "saml-user-property-mapper",
+            "oidc-full-name-mapper",
+            "oidc-sha256-pairwise-sub-mapper"
+          ]
+        }
+      },
+      {
+        "id": "04878c61-1a9c-42e7-8006-10b6f8f985d0",
+        "name": "Trusted Hosts",
+        "providerId": "trusted-hosts",
+        "subType": "anonymous",
+        "subComponents": {},
+        "config": {
+          "host-sending-registration-request-must-match": [
+            "true"
+          ],
+          "client-uris-must-match": [
+            "true"
+          ]
+        }
+      },
+      {
+        "id": "382402ab-b141-4702-8a43-dd62f90020d2",
+        "name": "Max Clients Limit",
+        "providerId": "max-clients",
+        "subType": "anonymous",
+        "subComponents": {},
+        "config": {
+          "max-clients": [
+            "200"
+          ]
+        }
+      },
+      {
+        "id": "8c965e63-599e-41b9-b7db-bbd212ba4204",
+        "name": "Consent Required",
+        "providerId": "consent-required",
+        "subType": "anonymous",
+        "subComponents": {},
+        "config": {}
+      }
+    ],
+    "org.keycloak.keys.KeyProvider": [
+      {
+        "id": "ae5467c6-ecf3-48e2-872b-8d671d581e96",
+        "name": "aes-generated",
+        "providerId": "aes-generated",
+        "subComponents": {},
+        "config": {
+          "priority": [
+            "100"
+          ]
+        }
+      },
+      {
+        "id": "2b79fd63-aaba-4524-be66-f6eddc17edb2",
+        "name": "rsa-generated",
+        "providerId": "rsa-generated",
+        "subComponents": {},
+        "config": {
+          "priority": [
+            "100"
+          ]
+        }
+      },
+      {
+        "id": "97567b5e-61bb-4929-bbef-29aa2cde4cf9",
+        "name": "hmac-generated",
+        "providerId": "hmac-generated",
+        "subComponents": {},
+        "config": {
+          "priority": [
+            "100"
+          ],
+          "algorithm": [
+            "HS256"
+          ]
+        }
+      }
+    ]
+  },
+  "internationalizationEnabled": false,
+  "supportedLocales": [],
+  "authenticationFlows": [
+    {
+      "id": "112556c1-b4d3-4512-9b1c-3f67a2fb2f9d",
+      "alias": "Handle Existing Account",
+      "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider",
+      "providerId": "basic-flow",
+      "topLevel": false,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "idp-confirm-link",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "idp-email-verification",
+          "requirement": "ALTERNATIVE",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "requirement": "ALTERNATIVE",
+          "priority": 30,
+          "flowAlias": "Verify Existing Account by Re-authentication",
+          "userSetupAllowed": false,
+          "autheticatorFlow": true
+        }
+      ]
+    },
+    {
+      "id": "3dc52ebd-872c-407a-9c51-c2e8bbbb765d",
+      "alias": "Verify Existing Account by Re-authentication",
+      "description": "Reauthentication of existing account",
+      "providerId": "basic-flow",
+      "topLevel": false,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "idp-username-password-form",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "auth-otp-form",
+          "requirement": "OPTIONAL",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "24204e7e-7acd-4e14-8ba2-66fe3bbf6d42",
+      "alias": "browser",
+      "description": "browser based authentication",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "auth-cookie",
+          "requirement": "ALTERNATIVE",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "auth-spnego",
+          "requirement": "DISABLED",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "identity-provider-redirector",
+          "requirement": "ALTERNATIVE",
+          "priority": 25,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "requirement": "ALTERNATIVE",
+          "priority": 30,
+          "flowAlias": "forms",
+          "userSetupAllowed": false,
+          "autheticatorFlow": true
+        }
+      ]
+    },
+    {
+      "id": "4859ae98-00f2-4361-b8dc-df74d8d81344",
+      "alias": "clients",
+      "description": "Base authentication for clients",
+      "providerId": "client-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "client-secret",
+          "requirement": "ALTERNATIVE",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "client-jwt",
+          "requirement": "ALTERNATIVE",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "client-secret-jwt",
+          "requirement": "ALTERNATIVE",
+          "priority": 30,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "client-x509",
+          "requirement": "ALTERNATIVE",
+          "priority": 40,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "3ab2060a-9242-477a-9c4a-2199f041f9a8",
+      "alias": "direct grant",
+      "description": "OpenID Connect Resource Owner Grant",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "direct-grant-validate-username",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "direct-grant-validate-password",
+          "requirement": "REQUIRED",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "direct-grant-validate-otp",
+          "requirement": "OPTIONAL",
+          "priority": 30,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "182e317e-7cf1-4282-8e3a-f36227769a2c",
+      "alias": "docker auth",
+      "description": "Used by Docker clients to authenticate against the IDP",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "docker-http-basic-authenticator",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "abc99935-1d39-4885-84d6-b31a9ac434d1",
+      "alias": "first broker login",
+      "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticatorConfig": "review profile config",
+          "authenticator": "idp-review-profile",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticatorConfig": "create unique user config",
+          "authenticator": "idp-create-user-if-unique",
+          "requirement": "ALTERNATIVE",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "requirement": "ALTERNATIVE",
+          "priority": 30,
+          "flowAlias": "Handle Existing Account",
+          "userSetupAllowed": false,
+          "autheticatorFlow": true
+        }
+      ]
+    },
+    {
+      "id": "089a2e49-9a5c-430a-9257-f53cf2524d73",
+      "alias": "forms",
+      "description": "Username, password, otp and other auth forms.",
+      "providerId": "basic-flow",
+      "topLevel": false,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "auth-username-password-form",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "auth-otp-form",
+          "requirement": "OPTIONAL",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "cef902eb-e486-4ba2-b0a3-4fe5464f4f3f",
+      "alias": "http challenge",
+      "description": "An authentication flow based on challenge-response HTTP Authentication Schemes",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "no-cookie-redirect",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "basic-auth",
+          "requirement": "REQUIRED",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "basic-auth-otp",
+          "requirement": "DISABLED",
+          "priority": 30,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "auth-spnego",
+          "requirement": "DISABLED",
+          "priority": 40,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "1e9f2a42-0075-4af7-a4b8-9ca47f458956",
+      "alias": "registration",
+      "description": "registration flow",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "registration-page-form",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "flowAlias": "registration form",
+          "userSetupAllowed": false,
+          "autheticatorFlow": true
+        }
+      ]
+    },
+    {
+      "id": "41882c85-49ed-4e97-9f79-6165fcf89de2",
+      "alias": "registration form",
+      "description": "registration form",
+      "providerId": "form-flow",
+      "topLevel": false,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "registration-user-creation",
+          "requirement": "REQUIRED",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "registration-profile-action",
+          "requirement": "REQUIRED",
+          "priority": 40,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "registration-password-action",
+          "requirement": "REQUIRED",
+          "priority": 50,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "registration-recaptcha-action",
+          "requirement": "DISABLED",
+          "priority": 60,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "bf967903-2352-4d45-a3dd-adae95fe54d0",
+      "alias": "reset credentials",
+      "description": "Reset credentials for a user if they forgot their password or something",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "reset-credentials-choose-user",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "reset-credential-email",
+          "requirement": "REQUIRED",
+          "priority": 20,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "reset-password",
+          "requirement": "REQUIRED",
+          "priority": 30,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        },
+        {
+          "authenticator": "reset-otp",
+          "requirement": "OPTIONAL",
+          "priority": 40,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    },
+    {
+      "id": "c3862bf5-e37c-44fb-bca2-1f7ccce89276",
+      "alias": "saml ecp",
+      "description": "SAML ECP Profile Authentication Flow",
+      "providerId": "basic-flow",
+      "topLevel": true,
+      "builtIn": true,
+      "authenticationExecutions": [
+        {
+          "authenticator": "http-basic-authenticator",
+          "requirement": "REQUIRED",
+          "priority": 10,
+          "userSetupAllowed": false,
+          "autheticatorFlow": false
+        }
+      ]
+    }
+  ],
+  "authenticatorConfig": [
+    {
+      "id": "f7651125-5e61-4326-9c4b-393a45259474",
+      "alias": "create unique user config",
+      "config": {
+        "require.password.update.after.registration": "false"
+      }
+    },
+    {
+      "id": "cf047c43-f09b-4e4e-b2bd-26ec5b904bd6",
+      "alias": "review profile config",
+      "config": {
+        "update.profile.on.first.login": "missing"
+      }
+    }
+  ],
+  "requiredActions": [
+    {
+      "alias": "CONFIGURE_TOTP",
+      "name": "Configure OTP",
+      "providerId": "CONFIGURE_TOTP",
+      "enabled": true,
+      "defaultAction": false,
+      "priority": 10,
+      "config": {}
+    },
+    {
+      "alias": "terms_and_conditions",
+      "name": "Terms and Conditions",
+      "providerId": "terms_and_conditions",
+      "enabled": false,
+      "defaultAction": false,
+      "priority": 20,
+      "config": {}
+    },
+    {
+      "alias": "UPDATE_PASSWORD",
+      "name": "Update Password",
+      "providerId": "UPDATE_PASSWORD",
+      "enabled": true,
+      "defaultAction": false,
+      "priority": 30,
+      "config": {}
+    },
+    {
+      "alias": "UPDATE_PROFILE",
+      "name": "Update Profile",
+      "providerId": "UPDATE_PROFILE",
+      "enabled": true,
+      "defaultAction": false,
+      "priority": 40,
+      "config": {}
+    },
+    {
+      "alias": "VERIFY_EMAIL",
+      "name": "Verify Email",
+      "providerId": "VERIFY_EMAIL",
+      "enabled": true,
+      "defaultAction": false,
+      "priority": 50,
+      "config": {}
+    }
+  ],
+  "browserFlow": "browser",
+  "registrationFlow": "registration",
+  "directGrantFlow": "direct grant",
+  "resetCredentialsFlow": "reset credentials",
+  "clientAuthenticationFlow": "clients",
+  "dockerAuthenticationFlow": "docker auth",
+  "attributes": {
+    "_browser_header.xXSSProtection": "1; mode=block",
+    "_browser_header.xFrameOptions": "SAMEORIGIN",
+    "_browser_header.strictTransportSecurity": "max-age=31536000; includeSubDomains",
+    "permanentLockout": "false",
+    "quickLoginCheckMilliSeconds": "1000",
+    "_browser_header.xRobotsTag": "none",
+    "maxFailureWaitSeconds": "900",
+    "minimumQuickLoginWaitSeconds": "60",
+    "failureFactor": "30",
+    "actionTokenGeneratedByUserLifespan": "300",
+    "maxDeltaTimeSeconds": "43200",
+    "_browser_header.xContentTypeOptions": "nosniff",
+    "offlineSessionMaxLifespan": "5184000",
+    "actionTokenGeneratedByAdminLifespan": "43200",
+    "_browser_header.contentSecurityPolicyReportOnly": "",
+    "bruteForceProtected": "false",
+    "_browser_header.contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';",
+    "waitIncrementSeconds": "60",
+    "offlineSessionMaxLifespanEnabled": "false"
+  },
+  "keycloakVersion": "6.0.1",
+  "userManagedAccessAllowed": false
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
index 57f8335..3864229 100644
--- a/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
+++ b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
@@ -1173,21 +1173,21 @@
                     configuration['notebook_spark_version'], configuration['notebook_hadoop_version']),
                 'https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/{0}/hadoop-aws-{0}.jar'.format('2.7.4'),
                 'https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk/{0}/aws-java-sdk-{0}.jar'.format('1.7.4'),
-                #'https://maven.twttr.com/com/hadoop/gplcompression/hadoop-lzo/{0}/hadoop-lzo-{0}.jar'.format('0.4.20'),
-                'http://central.maven.org/maven2/org/scalanlp/breeze_{0}/{1}/breeze_{0}-{1}.jar'.format('2.11', '0.12'),
-                'http://central.maven.org/maven2/org/scalanlp/breeze-natives_{0}/{1}/breeze-natives_{0}-{1}.jar'.format(
+                # 'https://maven.twttr.com/com/hadoop/gplcompression/hadoop-lzo/{0}/hadoop-lzo-{0}.jar'.format('0.4.20'),
+                'https://repo1.maven.org/maven2/org/scalanlp/breeze_{0}/{1}/breeze_{0}-{1}.jar'.format('2.11', '0.12'),
+                'https://repo1.maven.org/maven2/org/scalanlp/breeze-natives_{0}/{1}/breeze-natives_{0}-{1}.jar'.format(
                     '2.11', '0.12'),
-                'http://central.maven.org/maven2/org/scalanlp/breeze-viz_{0}/{1}/breeze-viz_{0}-{1}.jar'.format(
+                'https://repo1.maven.org/maven2/org/scalanlp/breeze-viz_{0}/{1}/breeze-viz_{0}-{1}.jar'.format(
                     '2.11', '0.12'),
-                'http://central.maven.org/maven2/org/scalanlp/breeze-macros_{0}/{1}/breeze-macros_{0}-{1}.jar'.format(
+                'https://repo1.maven.org/maven2/org/scalanlp/breeze-macros_{0}/{1}/breeze-macros_{0}-{1}.jar'.format(
                     '2.11', '0.12'),
-                'http://central.maven.org/maven2/org/scalanlp/breeze-parent_{0}/{1}/breeze-parent_{0}-{1}.jar'.format(
+                'https://repo1.maven.org/maven2/org/scalanlp/breeze-parent_{0}/{1}/breeze-parent_{0}-{1}.jar'.format(
                     '2.11', '0.12'),
-                'http://central.maven.org/maven2/org/jfree/jfreechart/{0}/jfreechart-{0}.jar'.format('1.0.19'),
-                'http://central.maven.org/maven2/org/jfree/jcommon/{0}/jcommon-{0}.jar'.format('1.0.24'),
+                'https://repo1.maven.org/maven2/org/jfree/jfreechart/{0}/jfreechart-{0}.jar'.format('1.0.19'),
+                'https://repo1.maven.org/maven2/org/jfree/jcommon/{0}/jcommon-{0}.jar'.format('1.0.24'),
                 '--no-check-certificate https://brunelvis.org/jar/spark-kernel-brunel-all-{0}.jar'.format('2.3'),
                 'http://archive.apache.org/dist/incubator/toree/0.2.0-incubating/toree-pip/toree-0.2.0.tar.gz',
-                'https://download2.rstudio.org/rstudio-server-{}-amd64.deb'.format(
+                'https://download2.rstudio.org/server/trusty/amd64/rstudio-server-{}-amd64.deb'.format(
                     configuration['notebook_rstudio_version']),
                 'http://us.download.nvidia.com/XFree86/Linux-x86_64/{0}/NVIDIA-Linux-x86_64-{0}.run'.format(
                     configuration['notebook_nvidia_version']),
@@ -1322,7 +1322,7 @@
         ec2_client = boto3.client('ec2', region_name=args.region)
         efs_client = boto3.client('efs', region_name=args.region)
         route53_client = boto3.client('route53')
-    tag_name = args.service_base_name + '-Tag'
+    tag_name = args.service_base_name + '-tag'
     pre_defined_vpc = True
     pre_defined_subnet = True
     pre_defined_sg = True
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/addCustomRepository.groovy b/infrastructure-provisioning/scripts/deploy_repository/templates/addCustomRepository.groovy
index 37d2dd3..204c0eb 100644
--- a/infrastructure-provisioning/scripts/deploy_repository/templates/addCustomRepository.groovy
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/addCustomRepository.groovy
@@ -194,7 +194,7 @@
                         ] as Map
                 ] as Map,
                 proxy: [
-                        remoteUrl: 'http://cran.us.r-project.org',
+                        remoteUrl: 'https://cloud.r-project.org',
                         contentMaxAge: 0,
                         metaDataMaxAge: 0
                 ] as Map,
diff --git a/infrastructure-provisioning/scripts/post-deployment_configuration.py b/infrastructure-provisioning/scripts/post-deployment_configuration.py
new file mode 100644
index 0000000..051258f
--- /dev/null
+++ b/infrastructure-provisioning/scripts/post-deployment_configuration.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+from fabric.api import *
+import argparse
+import requests
+import uuid
+from Crypto.PublicKey import RSA
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--keycloak_realm_name', type=str, default='KEYCLOAK_REALM_NAME', help='Keycloak Realm name')
+    parser.add_argument('--keycloak_auth_server_url', type=str, default='KEYCLOAK_AUTH_SERVER_URL', help='Keycloak auth server URL')
+    parser.add_argument('--keycloak_client_name', type=str, default='KEYCLOAK_CLIENT_NAME', help='Keycloak client name')
+    parser.add_argument('--keycloak_client_secret', type=str, default='KEYCLOAK_CLIENT_SECRET', help='Keycloak client secret')
+    parser.add_argument('--keycloak_user', type=str, default='KEYCLOAK_USER', help='Keycloak user')
+    parser.add_argument('--keycloak_admin_password', type=str, default='KEYCLOAK_ADMIN_PASSWORD',
+                        help='Keycloak admin password')
+    args = parser.parse_args()
+    headers = {
+        'Metadata-Flavor': 'Google',
+    }
+
+    print("Getting cloud and instance parameters")
+    server_external_ip = requests.get('http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip', headers=headers).text
+    dlab_sbn = requests.get('http://metadata/computeMetadata/v1/instance/name', headers=headers).text
+    dlab_ssn_static_ip_name = dlab_sbn + '-ip'
+    dlab_zone = requests.get('http://metadata/computeMetadata/v1/instance/zone', headers=headers).text.split('/')[-1]
+    dlab_region = '-'.join(dlab_zone.split('-', 2)[:2])
+    deployment_vpcId = local("sudo gcloud compute instances describe {0} --zone {1} --format 'value(networkInterfaces.network)' | sed 's|.*/||'".format(dlab_sbn, dlab_zone), capture=True)
+    deployment_subnetId = local("sudo gcloud compute instances describe {0} --zone {1} --format 'value(networkInterfaces.subnetwork)' | sed 's|.*/||'".format(dlab_sbn, dlab_zone), capture=True)
+    gcp_projectId = requests.get('http://metadata/computeMetadata/v1/project/project-id', headers=headers).text
+    keycloak_redirectUri = 'http://{}'.format(server_external_ip)
+
+    print("Generationg SSH keyfile for dlab-user")
+    key = RSA.generate(2048)
+    local("sudo sh -c 'echo \"{}\" > /home/dlab-user/keys/KEY-FILE.pem'".format(key.exportKey('PEM')))
+    local("sudo chmod 600 /home/dlab-user/keys/KEY-FILE.pem")
+    pubkey = key.publickey()
+    local("sudo sh -c 'echo \"{}\" > /home/dlab-user/.ssh/authorized_keys'".format(pubkey.exportKey('OpenSSH')))
+
+    print("Generationg MongoDB password")
+    mongo_pwd = uuid.uuid4().hex
+    try:
+        local("sudo echo -e 'db.changeUserPassword(\"admin\", \"{}\")' | mongo dlabdb --port 27017 -u admin -p MONGO_PASSWORD".format(mongo_pwd))
+        local('sudo sed -i "s|MONGO_PASSWORD|{}|g" /opt/dlab/conf/billing.yml'.format(mongo_pwd))
+
+        local('sudo sed -i "s|MONGO_PASSWORD|{}|g" /opt/dlab/conf/ssn.yml'.format(mongo_pwd))
+    except:
+        print('Mongo password was already changed')
+
+    print('Reserving external IP')
+    static_address_exist = local(
+        "sudo gcloud compute addresses list --filter='address={}'".format(server_external_ip), capture=True)
+    if static_address_exist:
+        print('Address is already static')
+    else:
+        local("sudo gcloud compute addresses create {0} --addresses {1} --region {2}".format(dlab_ssn_static_ip_name,
+                                                                                             server_external_ip,
+                                                                                             dlab_region), capture=True)
+
+    print("Overwriting SSN parameters")
+
+    if deployment_subnetId == 'default':
+        local('sudo sed -i "s|# user_subnets_range|user_subnets_range|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini')
+
+    local('sudo sed -i "s|DLAB_SBN|{}|g" /opt/dlab/conf/self-service.yml'.format(dlab_sbn))
+    local('sudo sed -i "s|KEYCLOAK_REDIRECTURI|{}|g" /opt/dlab/conf/self-service.yml'.format(keycloak_redirectUri))
+    local('sudo sed -i "s|KEYCLOAK_REALM_NAME|{}|g" /opt/dlab/conf/self-service.yml'.format(args.keycloak_realm_name))
+    local('sudo sed -i "s|KEYCLOAK_AUTH_SERVER_URL|{}|g" /opt/dlab/conf/self-service.yml'.format(
+        args.keycloak_auth_server_url))
+    local('sudo sed -i "s|KEYCLOAK_CLIENT_NAME|{}|g" /opt/dlab/conf/self-service.yml'.format(args.keycloak_client_name))
+    local('sudo sed -i "s|KEYCLOAK_CLIENT_SECRET|{}|g" /opt/dlab/conf/self-service.yml'.format(
+        args.keycloak_client_secret))
+
+    local('sudo sed -i "s|KEYCLOAK_REALM_NAME|{}|g" /opt/dlab/conf/provisioning.yml'.format(args.keycloak_realm_name))
+    local('sudo sed -i "s|KEYCLOAK_AUTH_SERVER_URL|{}|g" /opt/dlab/conf/provisioning.yml'.format(
+        args.keycloak_auth_server_url))
+    local('sudo sed -i "s|KEYCLOAK_CLIENT_NAME|{}|g" /opt/dlab/conf/provisioning.yml'.format(args.keycloak_client_name))
+    local('sudo sed -i "s|KEYCLOAK_CLIENT_SECRET|{}|g" /opt/dlab/conf/provisioning.yml'.format(
+        args.keycloak_client_secret))
+    local('sudo sed -i "s|DLAB_SBN|{}|g" /opt/dlab/conf/provisioning.yml'.format(dlab_sbn))
+    local('sudo sed -i "s|SUBNET_ID|{}|g" /opt/dlab/conf/provisioning.yml'.format(deployment_subnetId))
+    local('sudo sed -i "s|DLAB_REGION|{}|g" /opt/dlab/conf/provisioning.yml'.format(dlab_region))
+    local('sudo sed -i "s|DLAB_ZONE|{}|g" /opt/dlab/conf/provisioning.yml'.format(dlab_zone))
+    local('sudo sed -i "s|SSN_VPC_ID|{}|g" /opt/dlab/conf/provisioning.yml'.format(deployment_vpcId))
+    local('sudo sed -i "s|GCP_PROJECT_ID|{}|g" /opt/dlab/conf/provisioning.yml'.format(gcp_projectId))
+    local('sudo sed -i "s|KEYCLOAK_USER|{}|g" /opt/dlab/conf/provisioning.yml'.format(args.keycloak_user))
+    local('sudo sed -i "s|KEYCLOAK_ADMIN_PASSWORD|{}|g" /opt/dlab/conf/provisioning.yml'.format(
+        args.keycloak_admin_password))
+
+    local('sudo sed -i "s|DLAB_SBN|{}|g" /opt/dlab/conf/billing.yml'.format(dlab_sbn))
+
+    local('sudo sed -i "s|DLAB_SBN|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(dlab_sbn))
+    local('sudo sed -i "s|GCP_PROJECT_ID|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(gcp_projectId))
+    local('sudo sed -i "s|DLAB_REGION|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(dlab_region))
+    local('sudo sed -i "s|DLAB_ZONE|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(dlab_zone))
+    local('sudo sed -i "s|KEYCLOAK_REALM_NAME|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(args.keycloak_realm_name))
+    local('sudo sed -i "s|KEYCLOAK_AUTH_SERVER_URL|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(args.keycloak_auth_server_url))
+    local('sudo sed -i "s|KEYCLOAK_CLIENT_NAME|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(args.keycloak_client_name))
+    local('sudo sed -i "s|KEYCLOAK_CLIENT_SECRET|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(args.keycloak_client_secret))
+    local('sudo sed -i "s|KEYCLOAK_USER|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(args.keycloak_user))
+    local('sudo sed -i "s|KEYCLOAK_ADMIN_PASSWORD|{}|g" /opt/dlab/sources/infrastructure-provisioning/src/general/conf/overwrite.ini'.format(args.keycloak_admin_password))
+
+    print('SSL certificate generating')
+    keystore_passwd = uuid.uuid4().hex
+    local('sudo rm /home/dlab-user/keys/ssn*')
+    local('sudo rm /etc/ssl/certs/dlab*')
+    local('sudo keytool -delete -noprompt -trustcacerts -alias ssn -storepass changeit -keystore /usr/lib/jvm/java-8-openjdk-amd64/jre/lib/security/cacerts')
+    local('sudo openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout /etc/ssl/certs/dlab.key -out /etc/ssl/certs/dlab.crt -subj "/C=US/ST=US/L=US/O=dlab/CN=localhost/subjectAltName={0}"'.format(server_external_ip))
+    local('sudo openssl pkcs12 -export -in /etc/ssl/certs/dlab.crt -inkey /etc/ssl/certs/dlab.key -name ssn -out /home/dlab-user/keys/ssn.p12 -password pass:{0}'.format(keystore_passwd))
+    local('sudo keytool -importkeystore -srckeystore /home/dlab-user/keys/ssn.p12 -srcstoretype PKCS12 -alias ssn -destkeystore /home/dlab-user/keys/ssn.keystore.jks -deststorepass {0} -srcstorepass {0}'.format(keystore_passwd))
+    local('sudo keytool -importcert -trustcacerts -alias ssn -file /etc/ssl/certs/dlab.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-8-openjdk-amd64/jre/lib/security/cacerts')
+    local('sudo sed -i "s|KEYSTORE_PASSWORD|{}|g" /opt/dlab/conf/ssn.yml'.format(keystore_passwd))
+
+    print('Nginx configuration updating')
+    local('sudo sed -i "s|SERVER_IP|{}|g" /etc/nginx/conf.d/nginx_proxy.conf'.format(server_external_ip))
+    local('sudo systemctl restart nginx')
+    local('sudo supervisorctl restart all')
+
+    print('Rebuilding docker images')
+    local('cd /opt/dlab/sources/infrastructure-provisioning/src/ && sudo docker-build all')
+
+    print('[SUMMARY]')
+    print('Mongo password stored in /opt/dlab/conf/ssn.yml')
+    print('SSH key for dlab-user stored in /home/dlab-user/keys/KEY-FILE.pem')
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py b/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
index c37da53..741ca18 100644
--- a/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
+++ b/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
@@ -41,7 +41,7 @@
 parser.add_argument('--spark_version', type=str, default='')
 parser.add_argument('--hadoop_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -162,7 +162,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/edge/fabfile.py b/infrastructure-provisioning/src/edge/fabfile.py
index edbed71..66a656b 100644
--- a/infrastructure-provisioning/src/edge/fabfile.py
+++ b/infrastructure-provisioning/src/edge/fabfile.py
@@ -45,44 +45,6 @@
         sys.exit(1)
 
 
-#def run():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
-#                                               os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_prepare'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed preparing Edge node.", str(err))
-#        sys.exit(1)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_configure'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed configuring Edge node.", str(err))
-#        sys.exit(1)
-
-
-# Main function for terminating EDGE node and exploratory environment if exists
-#def terminate():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#    try:
-#        local("~/scripts/{}.py".format('edge_terminate'))
-#    except Exception as err:
-#       traceback.print_exc()
-#        append_result("Failed terminating Edge node.", str(err))
-#        sys.exit(1)
-
-
 # Main function for stopping EDGE node
 def stop():
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
@@ -111,41 +73,3 @@
         traceback.print_exc()
         append_result("Failed starting Edge node.", str(err))
         sys.exit(1)
-
-
-#def recreate():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
-#                                               os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_prepare'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed preparing Edge node.", str(err))
-#        sys.exit(1)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_configure'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed configuring Edge node.", str(err))
-#        sys.exit(1)
-
-#def reupload_key():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
-#                                               os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#
-#    try:
-#        local("~/scripts/{}.py".format('reupload_ssh_key'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed to reupload key on Edge node.", str(err))
-#        sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/conf/dlab.ini b/infrastructure-provisioning/src/general/conf/dlab.ini
index 4405373..8ab5f9e 100644
--- a/infrastructure-provisioning/src/general/conf/dlab.ini
+++ b/infrastructure-provisioning/src/general/conf/dlab.ini
@@ -44,10 +44,10 @@
 pypi_mirror = pypi.doubanio.com
 ### Name of own GitLab SSL certificate
 gitlab_certfile = dlab-gitlab.crt
-###Enable or Disable creating image at first time
-# image_enabled =
+### Enable or Disable creating image at first time
+image_enabled = true
 ###Enable or Disable shared images
-# shared_image_enabled =
+#shared_image_enabled = true
 ### CIDR of VPC
 vpc_cidr = '172.31.0.0/16'
 ### CIDR of second VPC
@@ -67,6 +67,20 @@
 billing_tag_key = product
 ### Billing tag value
 billing_tag_value = dlab
+### Enable or disable Step certificates
+stepcerts_enabled = false
+### Step root certificate in base64 format
+# stepcerts_root_ca =
+### Step certificates kid
+# stepcerts_kid =
+### Step certificates kid password
+# stepcerts_kid_password =
+### Step certificates CA URL
+# stepcerts_ca_url =
+### Prefix of the private subnet
+private_subnet_prefix = 24
+### Range of subnets defined by user
+# user_subnets_range = 172.31.0.0/24 - 172.31.50.0/24
 
 #--- [aws] section contains all common parameters related to Amazon ---#
 [aws]
@@ -90,8 +104,6 @@
 # iam_user =
 ### EC2 instance type for notebook
 # notebook_instance_type =
-### Prefix of the private subnet
-private_subnet_prefix = 24
 ### EC2 instance type for SSN
 ssn_instance_size = t2.large
 ### EC2 instance type for EDGE
@@ -128,8 +140,6 @@
 ssn_instance_size = Standard_DS2_v2
 ### Instance type for EDGE
 edge_instance_size = Standard_DS1_v2
-### Prefix of the private subnet
-private_subnet_prefix = 24
 ### Master node size for Data Engine
 # dataengine_master_size =
 ### Slave node size for Data Engine
@@ -153,7 +163,7 @@
 ### Azure region code
 # region_info =
 ### Azure datalake to create
-# datalake_enable =
+datalake_enable = false
 ### Azure login application ID
 # application_id =
 
@@ -177,7 +187,6 @@
 ### GCP ami name based on RedHat conf_os_family for all dlab instances
 redhat_image_name =
 ### Prefix of the private subnet
-private_subnet_prefix = 24
 ### Instance type for EDGE
 ssn_instance_size = n1-standard-2
 ### Instance type for EDGE
@@ -192,7 +201,7 @@
 ### Elastic IP which will be associated with SSN node
 # elastic_ip =
 ### Version of Docker to be installed on SSN
-docker_version = 17.06.2
+docker_version = 18.06.3
 ### Name of hosted zone for Route53
 # hosted_zone_name =
 ### ID of hosted zone
@@ -216,17 +225,17 @@
 ### Size of the additional volume for notebook instance
 disk_size = 30
 ### Version of Apache Spark to be installed on notebook
-spark_version = 2.3.2
+spark_version = 2.4.4
 ### Version of Apache Hadoop to be installed on notebook
 hadoop_version = 2.7
 ### Version of Jupyter to be installed on notebook
-jupyter_version = 5.7.4
+jupyter_version = 6.0.2
 ### Version of TensorFlow to be installed on notebook
 tensorflow_version = 1.8.0
 ### Version of Zeppelin to be installed on notebook
-zeppelin_version = 0.8.0
+zeppelin_version = 0.8.2
 ### Version of Rstudio to be installed on notebook
-rstudio_version = 1.1.463
+rstudio_version = 1.2.5033
 ### Version of Scala to be installed on notebook
 scala_version = 2.12.8
 ### Version of Livy top be installed on notebook
@@ -271,6 +280,14 @@
 numpy_version = 1.14.3
 ### Apache Ivy version
 ivy_version = 2.4.0
+### Matplotlib version
+matplotlib_version = 2.0.2
+### JupyterLab image
+jupyterlab_image = odahu\\/base-notebook:1.1.0-rc8
+### Superset version
+superset_version = 0.35.1
+### GCS-connector version
+gcs_connector_version = 2.0.1
 
 #--- [emr] section contains all parameters that are using for emr provisioning ---#
 [emr]
@@ -302,7 +319,7 @@
 ### Count of slave nodes for Data Engine
 # instance_count =
 ### Type of notebooks for creating Data Engine from notebook images
-image_notebooks = jupyter,rstudio,zeppelin,tensor,tensor-rstudio,deeplearning
+image_notebooks = jupyter,jupyterlab,rstudio,zeppelin,tensor,tensor-rstudio,deeplearning
 ### Persent of RAM allocated for an operating system
 os_memory = 75
 ### Explicit allocation RAM for an operating system
@@ -333,6 +350,10 @@
 # client_name =
 ### Keycloak client secret
 # client_secret =
+### Keycloak user
+# user =
+### Keycloak user password
+# user_password =
 
 #--- [reverse_proxy] reverse proxy settings ---#
 [reverse_proxy]
diff --git a/infrastructure-provisioning/src/general/files/aws/base_Dockerfile b/infrastructure-provisioning/src/general/files/aws/base_Dockerfile
index ffbe54d..48f5e2d 100644
--- a/infrastructure-provisioning/src/general/files/aws/base_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/aws/base_Dockerfile
@@ -31,7 +31,7 @@
 
 # Install any python dependencies
 RUN pip install -UI pip==9.0.3 && \
-    pip install boto3 backoff fabric==1.14.0 fabvenv awscli argparse ujson jupyter pycrypto
+    pip install boto3 backoff fabric==1.14.0 fabvenv awscli argparse requests ujson jupyter pycrypto
 
 # Configuring ssh for user
 RUN mkdir -p /root/.ssh; echo "Host *" > /root/.ssh/config; \
diff --git a/infrastructure-provisioning/src/general/files/aws/dataengine-service_description.json b/infrastructure-provisioning/src/general/files/aws/dataengine-service_description.json
index 7caee2a..b4e5ba2 100644
--- a/infrastructure-provisioning/src/general/files/aws/dataengine-service_description.json
+++ b/infrastructure-provisioning/src/general/files/aws/dataengine-service_description.json
@@ -24,7 +24,7 @@
     },
   "templates":
   [
-    {"version":"emr-5.12.0", "applications": [{"Name":"Hadoop", "Version": "2.8.3"}, {"Name":"Spark", "Version": "2.2.1"}, {"Name":"Hive", "Version": "2.3.2"}]},
-    {"version":"emr-5.19.0", "applications": [{"Name":"Hadoop", "Version": "2.8.5"}, {"Name":"Spark", "Version": "2.3.2"}, {"Name":"Hive", "Version": "2.3.3"}]}
+    {"version":"emr-5.19.0", "applications": [{"Name":"Hadoop", "Version": "2.8.5"}, {"Name":"Spark", "Version": "2.3.2"}, {"Name":"Hive", "Version": "2.3.3"}]},
+    {"version":"emr-5.28.0", "applications": [{"Name":"Hadoop", "Version": "2.8.5"}, {"Name":"Spark", "Version": "2.4.4"}, {"Name":"Hive", "Version": "2.3.6"}]}
   ]
 }
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/aws/deeplearning_description.json b/infrastructure-provisioning/src/general/files/aws/deeplearning_description.json
index d3e48c3..a2db5ae 100644
--- a/infrastructure-provisioning/src/general/files/aws/deeplearning_description.json
+++ b/infrastructure-provisioning/src/general/files/aws/deeplearning_description.json
@@ -8,10 +8,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Deep Learning  2.2",
+      "template_name": "Deep Learning  2.3",
       "description": "Base image with Deep Learning and Jupyter",
       "environment_type": "exploratory",
-      "version": "deeplearning-2.2",
+      "version": "deeplearning-2.3",
       "vendor": "AWS"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/aws/jupyter_description.json b/infrastructure-provisioning/src/general/files/aws/jupyter_description.json
index 9b14a9f..50dd357 100644
--- a/infrastructure-provisioning/src/general/files/aws/jupyter_description.json
+++ b/infrastructure-provisioning/src/general/files/aws/jupyter_description.json
@@ -19,10 +19,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Jupyter notebook 5.7.4",
+      "template_name": "Jupyter notebook 6.0.2",
       "description": "Base image with Jupyter node creation routines",
       "environment_type": "exploratory",
-      "version": "jupyter_notebook-5.7.4",
+      "version": "jupyter_notebook-6.0.2",
       "vendor": "AWS"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/aws/jupyterlab_Dockerfile b/infrastructure-provisioning/src/general/files/aws/jupyterlab_Dockerfile
new file mode 100644
index 0000000..203809c
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/aws/jupyterlab_Dockerfile
@@ -0,0 +1,51 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+
+FROM docker.dlab-base:latest
+
+ARG OS
+
+COPY jupyterlab/ /root/
+COPY general/scripts/os/* /root/scripts/
+COPY general/scripts/aws/jupyter* /root/scripts/
+COPY general/lib/os/${OS}/notebook_lib.py /usr/lib/python2.7/dlab/notebook_lib.py
+COPY general/templates/os/${OS}/ungit.service /root/templates/
+COPY general/templates/os/notebook_spark-defaults_local.conf /root/templates/
+COPY general/templates/os/pyspark_local_template.json /root/templates/
+COPY general/templates/os/py3spark_local_template.json /root/templates/
+COPY general/templates/os/pyspark_dataengine-service_template.json /root/templates/
+COPY general/templates/os/r_dataengine-service_template.json /root/templates/
+COPY general/templates/os/r_template.json /root/templates/
+COPY general/templates/os/run_template.sh /root/templates/
+COPY general/templates/os/toree_dataengine-service_* /root/templates/
+COPY general/files/os/toree-assembly-0.2.0.jar /root/files/
+COPY general/files/os/toree_kernel.tar.gz /root/files/
+COPY general/templates/os/pyspark_dataengine_template.json /root/templates/
+COPY general/templates/os/r_dataengine_template.json /root/templates/
+COPY general/templates/os/toree_dataengine_template.json /root/templates/
+COPY general/templates/os/inactive.sh /root/templates/
+COPY general/templates/os/inactive.service /root/templates/
+COPY general/templates/os/inactive.timer /root/templates/
+
+RUN chmod a+x /root/fabfile.py; \
+    chmod a+x /root/scripts/*
+
diff --git a/infrastructure-provisioning/src/general/files/aws/jupyterlab_description.json b/infrastructure-provisioning/src/general/files/aws/jupyterlab_description.json
new file mode 100644
index 0000000..487586f
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/aws/jupyterlab_description.json
@@ -0,0 +1,29 @@
+{
+  "exploratory_environment_shapes" :
+  {
+    "For testing" : [
+      {"Size": "XS", "Description": "t2.medium", "Type": "t2.medium","Ram": "4 GB","Cpu": "2"}
+    ],
+    "Memory optimized" : [
+      {"Size": "S", "Description": "r3.xlarge", "Type": "r3.xlarge","Ram": "30.5 GB","Cpu": "4"},
+      {"Size": "M", "Description": "r4.2xlarge", "Type": "r4.2xlarge","Ram": "61 GB","Cpu": "8"},
+      {"Size": "M", "Description": "r3.4xlarge", "Type": "r3.4xlarge","Ram": "122 GB","Cpu": "16"},
+      {"Size": "L", "Description": "r3.8xlarge", "Type": "r3.8xlarge","Ram": "244 GB","Cpu": "32"}
+    ],
+    "Compute optimized": [
+      {"Size": "S", "Description": "c4.large", "Type": "c4.large","Ram": "3.75 GB","Cpu": "2"},
+      {"Size": "M", "Description": "c4.2xlarge", "Type": "c4.2xlarge","Ram": "15.0 GB","Cpu": "8"},
+      {"Size": "L", "Description": "c4.8xlarge", "Type": "c4.8xlarge","Ram": "60.0 GB","Cpu": "36"}
+    ]
+  },
+  "exploratory_environment_versions" :
+  [
+    {
+      "template_name": "JupyterLab 0.35.6",
+      "description": "Base image with JupyterLab node creation routines",
+      "environment_type": "exploratory",
+      "version": "jupyter_lab-0.35.6",
+      "vendor": "AWS"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/aws/project_Dockerfile b/infrastructure-provisioning/src/general/files/aws/project_Dockerfile
index 4fa38da..0c23ae0 100644
--- a/infrastructure-provisioning/src/general/files/aws/project_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/aws/project_Dockerfile
@@ -29,6 +29,8 @@
 COPY general/scripts/aws/edge_* /root/scripts/
 COPY general/lib/os/${OS}/edge_lib.py /usr/lib/python2.7/dlab/edge_lib.py
 COPY general/templates/aws/edge_s3_policy.json /root/templates/edge_s3_policy.json
+COPY general/templates/os/manage_step_certs.sh /root/templates/
+COPY general/templates/os/step-cert-manager.service /root/templates/
 
 RUN chmod a+x /root/fabfile.py; \
     chmod a+x /root/scripts/*
diff --git a/infrastructure-provisioning/src/general/files/aws/rstudio_description.json b/infrastructure-provisioning/src/general/files/aws/rstudio_description.json
index e23e275..f517632 100644
--- a/infrastructure-provisioning/src/general/files/aws/rstudio_description.json
+++ b/infrastructure-provisioning/src/general/files/aws/rstudio_description.json
@@ -19,10 +19,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "RStudio 1.1.463",
+      "template_name": "RStudio 1.2.5033",
       "description": "Base image with RStudio node creation routines",
       "environment_type": "exploratory",
-      "version": "RStudio-1.1.463",
+      "version": "RStudio-1.2.5033",
       "vendor": "AWS"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/aws/ssn_Dockerfile b/infrastructure-provisioning/src/general/files/aws/ssn_Dockerfile
index 7283925..aeef12b 100644
--- a/infrastructure-provisioning/src/general/files/aws/ssn_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/aws/ssn_Dockerfile
@@ -28,6 +28,9 @@
 COPY infrastructure-provisioning/src/general/lib/os/${OS}/ssn_lib.py /usr/lib/python2.7/dlab/ssn_lib.py
 COPY infrastructure-provisioning/src/general/files/aws/ssn_policy.json /root/files/
 COPY infrastructure-provisioning/src/general/templates/aws/jenkins_jobs /root/templates/jenkins_jobs
+COPY infrastructure-provisioning/src/general/templates/os/manage_step_certs.sh /root/templates/
+COPY infrastructure-provisioning/src/general/templates/os/step-cert-manager.service /root/templates/
+COPY infrastructure-provisioning/src/general/templates/os/renew_certificates.sh /root/templates/
 
 RUN chmod a+x /root/fabfile.py; \
     chmod a+x /root/scripts/*
diff --git a/infrastructure-provisioning/src/general/files/aws/zeppelin_description.json b/infrastructure-provisioning/src/general/files/aws/zeppelin_description.json
index 31cb86d..d2b4ae1 100644
--- a/infrastructure-provisioning/src/general/files/aws/zeppelin_description.json
+++ b/infrastructure-provisioning/src/general/files/aws/zeppelin_description.json
@@ -19,10 +19,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Apache Zeppelin 0.8.0",
+      "template_name": "Apache Zeppelin 0.8.2",
       "description": "Base image with Apache Zeppelin node creation routines",
       "environment_type": "exploratory",
-      "version": "zeppelin-0.8.0",
+      "version": "zeppelin-0.8.2",
       "vendor": "AWS"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/azure/base_Dockerfile b/infrastructure-provisioning/src/general/files/azure/base_Dockerfile
index 68ce799..dcf939e 100644
--- a/infrastructure-provisioning/src/general/files/azure/base_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/azure/base_Dockerfile
@@ -31,7 +31,7 @@
 
 # Install any python dependencies
 RUN pip install -UI pip==9.0.3 && \
-    pip install backoff fabric==1.14.0 fabvenv argparse ujson jupyter pycrypto azure==2.0.0 azure-mgmt-authorization pyyaml
+    pip install backoff fabric==1.14.0 fabvenv argparse requests ujson jupyter pycrypto azure==2.0.0 azure-mgmt-authorization pyyaml
 
 # Configuring ssh for user
 RUN mkdir -p /root/.ssh; echo "Host *" > /root/.ssh/config; \
diff --git a/infrastructure-provisioning/src/general/files/azure/deeplearning_description.json b/infrastructure-provisioning/src/general/files/azure/deeplearning_description.json
index 5ad1114..55116fc 100644
--- a/infrastructure-provisioning/src/general/files/azure/deeplearning_description.json
+++ b/infrastructure-provisioning/src/general/files/azure/deeplearning_description.json
@@ -8,10 +8,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Deep Learning  2.2",
+      "template_name": "Deep Learning  2.3",
       "description": "Base image with Deep Learning and Jupyter",
       "environment_type": "exploratory",
-      "version": "deeplearning-2.2",
+      "version": "deeplearning-2.3",
       "vendor": "Azure"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/azure/jupyter_description.json b/infrastructure-provisioning/src/general/files/azure/jupyter_description.json
index 638531e..7b14870 100644
--- a/infrastructure-provisioning/src/general/files/azure/jupyter_description.json
+++ b/infrastructure-provisioning/src/general/files/azure/jupyter_description.json
@@ -15,10 +15,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Jupyter notebook 5.7.4",
+      "template_name": "Jupyter notebook 6.0.2",
       "description": "Base image with Jupyter node creation routines",
       "environment_type": "exploratory",
-      "version": "jupyter_notebook-5.7.4",
+      "version": "jupyter_notebook-6.0.2",
       "vendor": "Azure"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/azure/jupyterlab_Dockerfile b/infrastructure-provisioning/src/general/files/azure/jupyterlab_Dockerfile
new file mode 100644
index 0000000..97739c1
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/azure/jupyterlab_Dockerfile
@@ -0,0 +1,51 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+
+FROM docker.dlab-base:latest
+
+ARG OS
+
+COPY jupyterlab/ /root/
+COPY general/scripts/os/* /root/scripts/
+COPY general/scripts/azure/jupyter* /root/scripts/
+COPY general/lib/os/${OS}/notebook_lib.py /usr/lib/python2.7/dlab/notebook_lib.py
+COPY general/templates/os/${OS}/jupyter-notebook.service /root/templates/
+COPY general/templates/os/${OS}/ungit.service /root/templates/
+COPY general/templates/os/notebook_spark-defaults_local.conf /root/templates/
+COPY general/templates/os/pyspark_local_template.json /root/templates/
+COPY general/templates/os/py3spark_local_template.json /root/templates/
+COPY general/templates/os/r_template.json /root/templates/
+COPY general/templates/os/run_template.sh /root/templates/
+COPY general/files/os/toree-assembly-0.2.0.jar /root/files/
+COPY general/files/os/toree_kernel.tar.gz /root/files/
+COPY general/templates/os/pyspark_dataengine_template.json /root/templates/
+COPY general/templates/os/r_dataengine_template.json /root/templates/
+COPY general/templates/os/toree_dataengine_template.json /root/templates/
+COPY general/templates/os/inactive.sh /root/templates/
+COPY general/templates/os/inactive.service /root/templates/
+COPY general/templates/os/inactive.timer /root/templates/
+COPY general/templates/azure/core-site* /root/templates/
+
+
+RUN chmod a+x /root/fabfile.py; \
+    chmod a+x /root/scripts/*
+
diff --git a/infrastructure-provisioning/src/general/files/azure/jupyterlab_description.json b/infrastructure-provisioning/src/general/files/azure/jupyterlab_description.json
new file mode 100644
index 0000000..739bdcf
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/azure/jupyterlab_description.json
@@ -0,0 +1,25 @@
+{
+  "exploratory_environment_shapes" :
+  {
+    "Memory optimized" : [
+      {"Size": "S", "Description": "Standard_E4s_v3", "Type": "Standard_E4s_v3","Ram": "32 GB","Cpu": "4"},
+      {"Size": "M", "Description": "Standard_E16s_v3", "Type": "Standard_E16s_v3","Ram": "128 GB","Cpu": "16"},
+      {"Size": "L", "Description": "Standard_E32s_v3", "Type": "Standard_E32s_v3","Ram": "256 GB","Cpu": "32"}
+    ],
+    "Compute optimized": [
+      {"Size": "S", "Description": "Standard_F2s", "Type": "Standard_F2s","Ram": "4 GB","Cpu": "2"},
+      {"Size": "M", "Description": "Standard_F8s", "Type": "Standard_F8s","Ram": "16.0 GB","Cpu": "8"},
+      {"Size": "L", "Description": "Standard_F16s", "Type": "Standard_F16s","Ram": "32.0 GB","Cpu": "16"}
+    ]
+  },
+  "exploratory_environment_versions" :
+  [
+    {
+      "template_name": "JupyterLab 0.35.6",
+      "description": "Base image with JupyterLab node creation routines",
+      "environment_type": "exploratory",
+      "version": "jupyter_lab-0.35.6",
+      "vendor": "Azure"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/azure/project_Dockerfile b/infrastructure-provisioning/src/general/files/azure/project_Dockerfile
index 29c80ec..823becc 100644
--- a/infrastructure-provisioning/src/general/files/azure/project_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/azure/project_Dockerfile
@@ -28,6 +28,8 @@
 COPY general/scripts/azure/project_* /root/scripts/
 COPY general/scripts/azure/edge_* /root/scripts/
 COPY general/lib/os/${OS}/edge_lib.py /usr/lib/python2.7/dlab/edge_lib.py
+COPY general/templates/os/manage_step_certs.sh /root/templates/
+COPY general/templates/os/step-cert-manager.service /root/templates/
 
 RUN chmod a+x /root/fabfile.py; \
     chmod a+x /root/scripts/*
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/azure/rstudio_description.json b/infrastructure-provisioning/src/general/files/azure/rstudio_description.json
index 8457197..caf01fb 100644
--- a/infrastructure-provisioning/src/general/files/azure/rstudio_description.json
+++ b/infrastructure-provisioning/src/general/files/azure/rstudio_description.json
@@ -15,10 +15,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "RStudio 1.1.463",
+      "template_name": "RStudio 1.2.5033",
       "description": "Base image with RStudio node creation routines",
       "environment_type": "exploratory",
-      "version": "RStudio-1.1.463",
+      "version": "RStudio-1.2.5033",
       "vendor": "Azure"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/azure/ssn_Dockerfile b/infrastructure-provisioning/src/general/files/azure/ssn_Dockerfile
index b1e87aa..ee9be75 100644
--- a/infrastructure-provisioning/src/general/files/azure/ssn_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/azure/ssn_Dockerfile
@@ -27,6 +27,9 @@
 COPY infrastructure-provisioning/src/general/scripts/azure/ssn_* /root/scripts/
 COPY infrastructure-provisioning/src/general/lib/os/${OS}/ssn_lib.py /usr/lib/python2.7/dlab/ssn_lib.py
 COPY infrastructure-provisioning/src/general/templates/azure/jenkins_jobs /root/templates/jenkins_jobs
+COPY infrastructure-provisioning/src/general/templates/os/manage_step_certs.sh /root/templates/
+COPY infrastructure-provisioning/src/general/templates/os/step-cert-manager.service /root/templates/
+COPY infrastructure-provisioning/src/general/templates/os/renew_certificates.sh /root/templates/
 
 RUN chmod a+x /root/fabfile.py; \
     chmod a+x /root/scripts/*
diff --git a/infrastructure-provisioning/src/general/files/azure/tensor_description.json b/infrastructure-provisioning/src/general/files/azure/tensor_description.json
index 06f67c7..4a71198 100644
--- a/infrastructure-provisioning/src/general/files/azure/tensor_description.json
+++ b/infrastructure-provisioning/src/general/files/azure/tensor_description.json
@@ -8,10 +8,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "TensorFlow 1.8.0",
+      "template_name": "Jupyter with TensorFlow 1.8.0",
       "description": "Base image with TensorFlow and Jupyter node creation routines",
       "environment_type": "exploratory",
-      "version": "tensorflow_gpu-1.4.0",
+      "version": "tensorflow_gpu-1.8.0",
       "vendor": "Azure"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/azure/zeppelin_description.json b/infrastructure-provisioning/src/general/files/azure/zeppelin_description.json
index 499d3ff..5f0e6c3 100644
--- a/infrastructure-provisioning/src/general/files/azure/zeppelin_description.json
+++ b/infrastructure-provisioning/src/general/files/azure/zeppelin_description.json
@@ -15,10 +15,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Apache Zeppelin 0.8.0",
+      "template_name": "Apache Zeppelin 0.8.2",
       "description": "Base image with Apache Zeppelin node creation routines",
       "environment_type": "exploratory",
-      "version": "zeppelin-0.8.0",
+      "version": "zeppelin-0.8.2",
       "vendor": "Azure"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/gcp/dataengine-service_description.json b/infrastructure-provisioning/src/general/files/gcp/dataengine-service_description.json
index b5f337a..1bf4fb6 100644
--- a/infrastructure-provisioning/src/general/files/gcp/dataengine-service_description.json
+++ b/infrastructure-provisioning/src/general/files/gcp/dataengine-service_description.json
@@ -25,7 +25,7 @@
   },
   "templates":
   [
-    {"version":"1.2", "applications": [{"Name":"Hadoop", "Version": "2.8.2"}, {"Name":"Spark", "Version": "2.2.0"}, {"Name":"Hive", "Version": "2.1.1"}]},
-    {"version":"1.3", "applications": [{"Name":"Hadoop", "Version": "2.9.2"}, {"Name":"Spark", "Version": "2.3.2"}, {"Name":"Hive", "Version": "2.3.4"}]}
+    {"version":"1.3", "applications": [{"Name":"Hadoop", "Version": "2.9.2"}, {"Name":"Spark", "Version": "2.3.2"}, {"Name":"Hive", "Version": "2.3.4"}]},
+    {"version":"1.4", "applications": [{"Name":"Hadoop", "Version": "2.9.2"}, {"Name":"Spark", "Version": "2.4.4"}, {"Name":"Hive", "Version": "2.3.6"}]}
   ]
 }
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/gcp/deeplearning_description.json b/infrastructure-provisioning/src/general/files/gcp/deeplearning_description.json
index 0ea1159..080be57 100644
--- a/infrastructure-provisioning/src/general/files/gcp/deeplearning_description.json
+++ b/infrastructure-provisioning/src/general/files/gcp/deeplearning_description.json
@@ -10,10 +10,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Deep Learning  2.2",
+      "template_name": "Deep Learning  2.3",
       "description": "Base image with Deep Learning and Jupyter",
       "environment_type": "exploratory",
-      "version": "deeplearning-2.2",
+      "version": "deeplearning-2.3",
       "vendor": "GCP"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/gcp/jupyter_description.json b/infrastructure-provisioning/src/general/files/gcp/jupyter_description.json
index 7e5d71c..a94ba57 100644
--- a/infrastructure-provisioning/src/general/files/gcp/jupyter_description.json
+++ b/infrastructure-provisioning/src/general/files/gcp/jupyter_description.json
@@ -23,10 +23,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Jupyter notebook 5.7.4",
+      "template_name": "Jupyter notebook 6.0.2",
       "description": "Base image with jupyter node creation routines",
       "environment_type": "exploratory",
-      "version": "jupyter_notebook-5.7.4",
+      "version": "jupyter_notebook-6.0.2",
       "vendor": "GCP"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/gcp/jupyterlab_Dockerfile b/infrastructure-provisioning/src/general/files/gcp/jupyterlab_Dockerfile
new file mode 100644
index 0000000..4d68e2f
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/gcp/jupyterlab_Dockerfile
@@ -0,0 +1,51 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+
+FROM docker.dlab-base:latest
+
+ARG OS
+
+COPY jupyterlab/ /root/
+COPY general/scripts/os/* /root/scripts/
+COPY general/scripts/gcp/jupyter* /root/scripts/
+COPY general/lib/os/${OS}/notebook_lib.py /usr/lib/python2.7/dlab/notebook_lib.py
+COPY general/templates/os/${OS}/ungit.service /root/templates/
+COPY general/templates/os/notebook_spark-defaults_local.conf /root/templates/
+COPY general/templates/os/pyspark_local_template.json /root/templates/
+COPY general/templates/os/py3spark_local_template.json /root/templates/
+COPY general/templates/os/pyspark_dataengine-service_template.json /root/templates/
+COPY general/templates/os/r_dataengine-service_template.json /root/templates/
+COPY general/templates/os/r_template.json /root/templates/
+COPY general/templates/os/run_template.sh /root/templates/
+COPY general/templates/os/toree_dataengine-service_* /root/templates/
+COPY general/files/os/toree-assembly-0.2.0.jar /root/files/
+COPY general/files/os/toree_kernel.tar.gz /root/files/
+COPY general/templates/os/pyspark_dataengine_template.json /root/templates/
+COPY general/templates/os/r_dataengine_template.json /root/templates/
+COPY general/templates/os/toree_dataengine_template.json /root/templates/
+COPY general/templates/os/inactive.sh /root/templates/
+COPY general/templates/os/inactive.service /root/templates/
+COPY general/templates/os/inactive.timer /root/templates/
+
+RUN chmod a+x /root/fabfile.py; \
+    chmod a+x /root/scripts/*
+
diff --git a/infrastructure-provisioning/src/general/files/gcp/jupyterlab_description.json b/infrastructure-provisioning/src/general/files/gcp/jupyterlab_description.json
new file mode 100644
index 0000000..3f202f7
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/gcp/jupyterlab_description.json
@@ -0,0 +1,33 @@
+{
+  "exploratory_environment_shapes" :
+  {
+    "For testing" : [
+      {"Size": "S", "Description": "n1-standard-2", "Type": "n1-standard-2","Ram": "7.50 GB","Cpu": "2"}
+    ],
+    "Memory optimized" : [
+      {"Size": "S", "Description": "n1-highmem-4", "Type": "n1-highmem-4","Ram": "26 GB","Cpu": "4"},
+      {"Size": "M", "Description": "n1-highmem-16", "Type": "n1-highmem-16","Ram": "104 GB","Cpu": "16"},
+      {"Size": "L", "Description": "n1-highmem-32", "Type": "n1-highmem-32","Ram": "208 GB","Cpu": "32"}
+    ],
+    "GPU optimized": [
+      {"Size": "S", "Description": "n1-standard-2", "Type": "n1-standard-2","Ram": "7.50 GB","Cpu": "2"},
+      {"Size": "M", "Description": "n1-highcpu-8", "Type": "n1-highcpu-8","Ram": "7.20 GB","Cpu": "8"},
+      {"Size": "L", "Description": "n1-highmem-32", "Type": "n1-highmem-32","Ram": "208 GB","Cpu": "32"}
+    ],
+    "Compute optimized": [
+      {"Size": "S", "Description": "n1-highcpu-2", "Type": "n1-highcpu-2","Ram": "1.80 GB","Cpu": "2"},
+      {"Size": "M", "Description": "n1-highcpu-8", "Type": "n1-highcpu-8","Ram": "7.20 GB","Cpu": "8"},
+      {"Size": "L", "Description": "n1-highcpu-32", "Type": "n1-highcpu-32","Ram": "28.8 GB","Cpu": "32"}
+    ]
+  },
+  "exploratory_environment_versions" :
+  [
+    {
+      "template_name": "JupyterLab 0.35.6",
+      "description": "Base image with JupyterLab node creation routines",
+      "environment_type": "exploratory",
+      "version": "jupyter_lab-0.35.6",
+      "vendor": "GCP"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/gcp/project_Dockerfile b/infrastructure-provisioning/src/general/files/gcp/project_Dockerfile
index fb9ecde..7fc44e5 100644
--- a/infrastructure-provisioning/src/general/files/gcp/project_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/gcp/project_Dockerfile
@@ -30,6 +30,8 @@
 COPY general/lib/os/${OS}/edge_lib.py /usr/lib/python2.7/dlab/edge_lib.py
 COPY general/files/gcp/ps_policy.json /root/files/ps_policy.json
 COPY general/files/gcp/ps_roles.json /root/files/ps_roles.json
+COPY general/templates/os/manage_step_certs.sh /root/templates/
+COPY general/templates/os/step-cert-manager.service /root/templates/
 
 RUN chmod a+x /root/fabfile.py; \
     chmod a+x /root/scripts/*
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/gcp/rstudio_description.json b/infrastructure-provisioning/src/general/files/gcp/rstudio_description.json
index ca8cfb7..27c2771 100644
--- a/infrastructure-provisioning/src/general/files/gcp/rstudio_description.json
+++ b/infrastructure-provisioning/src/general/files/gcp/rstudio_description.json
@@ -23,10 +23,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "RStudio 1.1.463",
+      "template_name": "RStudio 1.2.5033",
       "description": "Base image with rstudio node creation routines",
       "environment_type": "exploratory",
-      "version": "RStudio-1.1.463",
+      "version": "RStudio-1.2.5033",
       "vendor": "GCP"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/gcp/ssn_Dockerfile b/infrastructure-provisioning/src/general/files/gcp/ssn_Dockerfile
index 152e35d..902f48c 100644
--- a/infrastructure-provisioning/src/general/files/gcp/ssn_Dockerfile
+++ b/infrastructure-provisioning/src/general/files/gcp/ssn_Dockerfile
@@ -29,6 +29,9 @@
 COPY infrastructure-provisioning/src/general/files/gcp/ssn_policy.json /root/files/
 COPY infrastructure-provisioning/src/general/files/gcp/ssn_roles.json /root/files/
 COPY infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs /root/templates/jenkins_jobs
+COPY infrastructure-provisioning/src/general/templates/os/manage_step_certs.sh /root/templates/
+COPY infrastructure-provisioning/src/general/templates/os/step-cert-manager.service /root/templates/
+COPY infrastructure-provisioning/src/general/templates/os/renew_certificates.sh /root/templates/
 
 RUN chmod a+x /root/fabfile.py; \
     chmod a+x /root/scripts/*
diff --git a/infrastructure-provisioning/src/general/files/gcp/superset_Dockerfile b/infrastructure-provisioning/src/general/files/gcp/superset_Dockerfile
new file mode 100644
index 0000000..224482c
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/gcp/superset_Dockerfile
@@ -0,0 +1,42 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+
+FROM docker.dlab-base:latest
+
+ARG OS
+
+COPY superset/ /root/
+COPY general/scripts/os/* /root/scripts/
+COPY general/scripts/gcp/superset_* /root/scripts/
+COPY general/lib/os/${OS}/notebook_lib.py /usr/lib/python2.7/dlab/notebook_lib.py
+COPY general/templates/os/${OS}/superset-notebook.service /root/templates/
+COPY general/templates/os/${OS}/ungit.service /root/templates/
+
+COPY general/templates/os/inactive.sh /root/templates/
+COPY general/templates/os/inactive.service /root/templates/
+COPY general/templates/os/inactive.timer /root/templates/
+
+COPY general/templates/gcp/core-site.xml /root/templates/
+
+RUN chmod a+x /root/fabfile.py; \
+    chmod a+x /root/scripts/*
+
diff --git a/infrastructure-provisioning/src/general/files/gcp/superset_description.json b/infrastructure-provisioning/src/general/files/gcp/superset_description.json
new file mode 100644
index 0000000..98f394f
--- /dev/null
+++ b/infrastructure-provisioning/src/general/files/gcp/superset_description.json
@@ -0,0 +1,28 @@
+{
+  "exploratory_environment_shapes" :
+  {
+    "For testing" : [
+      {"Size": "S", "Description": "n1-standard-2", "Type": "n1-standard-2","Ram": "7.50 GB","Cpu": "2"}
+    ],
+    "Memory optimized" : [
+      {"Size": "S", "Description": "n1-highmem-4", "Type": "n1-highmem-4","Ram": "26 GB","Cpu": "4"},
+      {"Size": "M", "Description": "n1-highmem-16", "Type": "n1-highmem-16","Ram": "104 GB","Cpu": "16"},
+      {"Size": "L", "Description": "n1-highmem-32", "Type": "n1-highmem-32","Ram": "208 GB","Cpu": "32"}
+    ],
+    "Compute optimized": [
+      {"Size": "S", "Description": "n1-highcpu-2", "Type": "n1-highcpu-2","Ram": "1.80 GB","Cpu": "2"},
+      {"Size": "M", "Description": "n1-highcpu-8", "Type": "n1-highcpu-8","Ram": "7.20 GB","Cpu": "8"},
+      {"Size": "L", "Description": "n1-highcpu-32", "Type": "n1-highcpu-32","Ram": "28.8 GB","Cpu": "32"}
+    ]
+  },
+  "exploratory_environment_versions" :
+  [
+    {
+      "template_name": "Superset 0.35.1",
+      "description": "Base image with superset node creation routines",
+      "environment_type": "exploratory",
+      "version": "superset-0.34",
+      "vendor": "GCP"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/files/gcp/zeppelin_description.json b/infrastructure-provisioning/src/general/files/gcp/zeppelin_description.json
index 44b7c61..159c14e 100644
--- a/infrastructure-provisioning/src/general/files/gcp/zeppelin_description.json
+++ b/infrastructure-provisioning/src/general/files/gcp/zeppelin_description.json
@@ -23,10 +23,10 @@
   "exploratory_environment_versions" :
   [
     {
-      "template_name": "Apache Zeppelin 0.8.0",
+      "template_name": "Apache Zeppelin 0.8.2",
       "description": "Base image with Apache Zeppelin node creation routines",
       "environment_type": "exploratory",
-      "version": "zeppelin-0.8.0",
+      "version": "zeppelin-0.8.2",
       "vendor": "GCP"
     }
   ]
diff --git a/infrastructure-provisioning/src/general/files/os/local_endpoint.json b/infrastructure-provisioning/src/general/files/os/local_endpoint.json
index 67f9a54..190a47e 100644
--- a/infrastructure-provisioning/src/general/files/os/local_endpoint.json
+++ b/infrastructure-provisioning/src/general/files/os/local_endpoint.json
@@ -3,6 +3,8 @@
         "name" : "DEF_ENDPOINT_NAME",
         "url" : "https://localhost:8084/",
         "account" : "DEF_ENDPOINT_NAME",
-        "endpoint_tag" : "DEF_ENDPOINT_NAME"
+        "endpoint_tag" : "DEF_ENDPOINT_NAME",
+        "cloudProvider" : "CLOUD_PROVIDER",
+        "status" : "ACTIVE"
     }
 ]
diff --git a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
index 8ab9b35..9519053 100644
--- a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
@@ -66,7 +66,7 @@
         return False
 
 
-def create_s3_bucket(bucket_name, tag, region, bucket_name_tag):
+def create_s3_bucket(bucket_name, bucket_tags, region, bucket_name_tag):
     try:
         s3 = boto3.resource('s3', config=Config(signature_version='s3v4'))
         if region == "us-east-1":
@@ -84,18 +84,15 @@
                 ]
             })
         tags = list()
-        tags.append(tag)
         tags.append({'Key': os.environ['conf_tag_resource_id'],
                      'Value': os.environ['conf_service_base_name'] + ':' + bucket_name_tag})
-        tags.append({'Key': os.environ['conf_billing_tag_key'], 'Value': os.environ['conf_billing_tag_value']})
-        if 'conf_additional_tags' in os.environ:
-            for tag in os.environ['conf_additional_tags'].split(';'):
-                tags.append(
-                    {
-                        'Key': tag.split(':')[0],
-                        'Value': tag.split(':')[1]
-                    }
-                )
+        for tag in bucket_tags.split(','):
+            tags.append(
+                {
+                    'Key': tag.split(':')[0],
+                    'Value': tag.split(':')[1]
+                }
+            )
         tagging = bucket.Tagging()
         tagging.put(Tagging={'TagSet': tags})
         tagging.reload()
@@ -309,10 +306,10 @@
     client = boto3.client('ec2')
     try:
         route_tables = client.describe_route_tables(
-            Filters=[{'Name': 'tag:{}-Tag'.format(service_base_name), 'Values': ['{}'.format(
+            Filters=[{'Name': 'tag:{}-tag'.format(service_base_name), 'Values': ['{}'.format(
                 service_base_name)]}]).get('RouteTables')
         route_tables2 = client.describe_route_tables(Filters=[
-            {'Name': 'tag:{}-secondary-Tag'.format(service_base_name), 'Values': ['{}'.format(
+            {'Name': 'tag:{}-secondary-tag'.format(service_base_name), 'Values': ['{}'.format(
                 service_base_name)]}]).get('RouteTables')
         for table in route_tables:
             routes = table.get('Routes')
@@ -347,7 +344,7 @@
     try:
         ec2 = boto3.resource('ec2')
         client = boto3.client('ec2')
-        tag = {"Key": service_base_name + '-Tag', "Value": service_base_name}
+        tag = {"Key": service_base_name + '-tag', "Value": service_base_name}
         tag_name = {"Key": 'Name', "Value": "{0}-peering-connection".format(service_base_name)}
         peering = ec2.create_vpc_peering_connection(PeerVpcId=vpc_id, VpcId=vpc2_id)
         client.accept_vpc_peering_connection(VpcPeeringConnectionId=peering.id)
@@ -513,7 +510,7 @@
         cluster = client.list_instances(ClusterId=cluster_id)
         instances = cluster['Instances']
         for instance in instances:
-            instance_tag = {'Key': os.environ['conf_service_base_name'] + '-Tag',
+            instance_tag = {'Key': os.environ['conf_service_base_name'] + '-tag',
                             'Value': node_name}
             tag_intance_volume(instance['Ec2InstanceId'], node_name, instance_tag)
     except Exception as err:
@@ -891,37 +888,42 @@
         traceback.print_exc(file=sys.stdout)
 
 
-def remove_all_iam_resources(instance_type, scientist=''):
+def remove_all_iam_resources(instance_type, project_name='', endpoint_name=''):
     try:
         client = boto3.client('iam')
-        service_base_name = os.environ['conf_service_base_name'].lower().replace('-', '_')
+        service_base_name = os.environ['conf_service_base_name']
         roles_list = []
+        if project_name:
+            start_prefix = '{}-{}-{}-'.format(service_base_name, project_name, endpoint_name)
+        else:
+            start_prefix = '{}-'.format(service_base_name)
         for item in client.list_roles(MaxItems=250).get("Roles"):
-            if item.get("RoleName").startswith(service_base_name + '-'):
+            if item.get("RoleName").startswith(start_prefix):
                 roles_list.append(item.get('RoleName'))
         if roles_list:
             roles_list.sort(reverse=True)
             for iam_role in roles_list:
-                if '-ssn-Role' in iam_role and instance_type == 'ssn' or instance_type == 'all':
+                if '-ssn-role' in iam_role and instance_type == 'ssn' or instance_type == 'all':
                     try:
-                        client.delete_role_policy(RoleName=iam_role, PolicyName='{0}-ssn-Policy'.format(
+                        client.delete_role_policy(RoleName=iam_role, PolicyName='{0}-ssn-policy'.format(
                             service_base_name))
                     except:
-                        print('There is no policy {}-ssn-Policy to delete'.format(service_base_name))
+                        print('There is no policy {}-ssn-policy to delete'.format(service_base_name))
                     role_profiles = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
                     if role_profiles:
                         for i in role_profiles:
                             role_profile_name = i.get('InstanceProfileName')
-                            if role_profile_name == '{0}-ssn-Profile'.format(service_base_name):
+                            if role_profile_name == '{0}-ssn-profile'.format(service_base_name):
                                 remove_roles_and_profiles(iam_role, role_profile_name)
                     else:
                         print("There is no instance profile for {}".format(iam_role))
                         client.delete_role(RoleName=iam_role)
                         print("The IAM role {} has been deleted successfully".format(iam_role))
-                if '-edge-Role' in iam_role:
-                    if instance_type == 'edge' and scientist in iam_role:
+                if '-edge-role' in iam_role:
+                    if instance_type == 'edge' and project_name in iam_role:
                         remove_detach_iam_policies(iam_role, 'delete')
-                        role_profile_name = '{0}-{1}-edge-Profile'.format(service_base_name, scientist)
+                        role_profile_name = '{0}-{1}-{2}-edge-profile'.format(service_base_name, project_name,
+                                                                              os.environ['endpoint_name'].lower())
                         try:
                             client.get_instance_profile(InstanceProfileName=role_profile_name)
                             remove_roles_and_profiles(iam_role, role_profile_name)
@@ -941,10 +943,11 @@
                             print("There is no instance profile for {}".format(iam_role))
                             client.delete_role(RoleName=iam_role)
                             print("The IAM role {} has been deleted successfully".format(iam_role))
-                if '-nb-de-Role' in iam_role:
-                    if instance_type == 'notebook' and scientist in iam_role:
+                if '-nb-de-role' in iam_role:
+                    if instance_type == 'notebook' and project_name in iam_role:
                         remove_detach_iam_policies(iam_role)
-                        role_profile_name = '{0}-{1}-nb-de-Profile'.format(service_base_name, scientist)
+                        role_profile_name = '{0}-{1}-{2}-nb-de-profile'.format(service_base_name, project_name,
+                                                                               os.environ['endpoint_name'].lower())
                         try:
                             client.get_instance_profile(InstanceProfileName=role_profile_name)
                             remove_roles_and_profiles(iam_role, role_profile_name)
@@ -968,22 +971,22 @@
             print("There are no IAM roles to delete. Checking instance profiles...")
         profile_list = []
         for item in client.list_instance_profiles(MaxItems=250).get("InstanceProfiles"):
-            if item.get("InstanceProfileName").startswith('{}-'.format(service_base_name)):
+            if item.get("InstanceProfileName").startswith(start_prefix):
                 profile_list.append(item.get('InstanceProfileName'))
         if profile_list:
             for instance_profile in profile_list:
-                if '-ssn-Profile' in instance_profile and instance_type == 'ssn' or instance_type == 'all':
+                if '-ssn-profile' in instance_profile and instance_type == 'ssn' or instance_type == 'all':
                     client.delete_instance_profile(InstanceProfileName=instance_profile)
                     print("The instance profile {} has been deleted successfully".format(instance_profile))
-                if '-edge-Profile' in instance_profile:
-                    if instance_type == 'edge' and scientist in instance_profile:
+                if '-edge-profile' in instance_profile:
+                    if instance_type == 'edge' and project_name in instance_profile:
                         client.delete_instance_profile(InstanceProfileName=instance_profile)
                         print("The instance profile {} has been deleted successfully".format(instance_profile))
                     if instance_type == 'all':
                         client.delete_instance_profile(InstanceProfileName=instance_profile)
                         print("The instance profile {} has been deleted successfully".format(instance_profile))
-                if '-nb-de-Profile' in instance_profile:
-                    if instance_type == 'notebook' and scientist in instance_profile:
+                if '-nb-de-profile' in instance_profile:
+                    if instance_type == 'notebook' and project_name in instance_profile:
                         client.delete_instance_profile(InstanceProfileName=instance_profile)
                         print("The instance profile {} has been deleted successfully".format(instance_profile))
                     if instance_type == 'all':
@@ -1037,7 +1040,7 @@
             if bucket_name in item.get('Name'):
                 for i in client.get_bucket_tagging(Bucket=item.get('Name')).get('TagSet'):
                     i.get('Key')
-                    if i.get('Key') == os.environ['conf_service_base_name'] + '-Tag':
+                    if i.get('Key') == os.environ['conf_service_base_name'].lower() + '-tag':
                         bucket_list.append(item.get('Name'))
         for s3bucket in bucket_list:
             if s3bucket:
@@ -1060,8 +1063,8 @@
     try:
         ec2 = boto3.resource('ec2')
         client = boto3.client('ec2')
-        tag_name = os.environ['conf_service_base_name'] + '-Tag'
-        tag2_name = os.environ['conf_service_base_name'] + '-secondary-Tag'
+        tag_name = os.environ['conf_service_base_name'].lower() + '-tag'
+        tag2_name = os.environ['conf_service_base_name'].lower() + '-secondary-tag'
         subnets = ec2.subnets.filter(
             Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [tag_value]}])
         subnets2 = ec2.subnets.filter(
@@ -1087,7 +1090,7 @@
 def remove_peering(tag_value):
     try:
         client = boto3.client('ec2')
-        tag_name = os.environ['conf_service_base_name'] + '-Tag'
+        tag_name = os.environ['conf_service_base_name'].lower() + '-tag'
         if os.environ['conf_duo_vpc_enable'] == 'true':
             peering_id = client.describe_vpc_peering_connections(Filters=[
                 {'Name': 'tag-key', 'Values': [tag_name]},
@@ -1172,8 +1175,7 @@
         resource = boto3.resource('ec2')
         client = boto3.client('ec2')
         for image in resource.images.filter(
-                Filters=[{'Name': 'name', 'Values': ['{}-*'.format(os.environ['conf_service_base_name'])]},
-                         {'Name': 'tag-value', 'Values': [os.environ['conf_service_base_name']]},
+                Filters=[{'Name': 'tag-value', 'Values': [os.environ['conf_service_base_name']]},
                          {'Name': 'tag-value', 'Values': [image_name]}]):
             client.deregister_image(ImageId=image.id)
             for device in image.block_device_mappings:
@@ -1352,11 +1354,14 @@
                 local("echo Waiting for image creation; sleep 20")
                 image.load()
             tag = {'Key': 'Name', 'Value': image_name}
+            sbn_tag = {'Key': 'SBN', 'Value': os.environ['conf_service_base_name']}
             response = client.describe_images(ImageIds=[image.id]).get('Images')[0].get('BlockDeviceMappings')
             for ebs in response:
                 if ebs.get('Ebs'):
                     snapshot_id = ebs.get('Ebs').get('SnapshotId')
+                    create_tag(snapshot_id, sbn_tag)
                     create_tag(snapshot_id, tag)
+            create_tag(image.id, sbn_tag)
             create_tag(image.id, tag)
             if tags:
                 all_tags = json.loads(tags)
@@ -1587,8 +1592,8 @@
                     {1}hadoop-aws-{0}.jar'.format('2.7.4', jars_dir))
             sudo('wget https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk/{0}/aws-java-sdk-{0}.jar -O \
                     {1}aws-java-sdk-{0}.jar'.format('1.7.4', jars_dir))
-            #sudo('wget https://maven.twttr.com/com/hadoop/gplcompression/hadoop-lzo/{0}/hadoop-lzo-{0}.jar -O \
-            #        {1}hadoop-lzo-{0}.jar'.format('0.4.20', jars_dir))
+            # sudo('wget https://maven.twttr.com/com/hadoop/gplcompression/hadoop-lzo/{0}/hadoop-lzo-{0}.jar -O \
+            #         {1}hadoop-lzo-{0}.jar'.format('0.4.20', jars_dir))
             sudo('touch /home/{}/.ensure_dir/local_jars_ensured'.format(os_user))
         except:
             sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
index 53d9eb5..8cac3c4 100644
--- a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
@@ -521,7 +521,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-ssn-disk0'.format(service_base_name),
+                            'name': '{}-ssn-volume-primary'.format(service_base_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -531,7 +531,7 @@
                         }
                     },
                     'os_profile': {
-                        'computer_name': instance_name,
+                        'computer_name': instance_name.replace('_', '-'),
                         'admin_username': dlab_ssh_user_name,
                         'linux_configuration': {
                             'disable_password_authentication': True,
@@ -568,7 +568,8 @@
                             },
                             'os_disk': {
                                 'os_type': 'Linux',
-                                'name': '{}-{}-edge-disk0'.format(service_base_name, project_name),
+                                'name': '{}-{}-{}-edge-volume-primary'.format(service_base_name, project_name,
+                                                                              os.environ['endpoint_name'].lower()),
                                 'create_option': create_option,
                                 'disk_size_gb': int(primary_disk_size),
                                 'tags': tags,
@@ -578,7 +579,7 @@
                             }
                         },
                         'os_profile': {
-                            'computer_name': instance_name,
+                            'computer_name': instance_name.replace('_', '-'),
                             'admin_username': dlab_ssh_user_name,
                             'linux_configuration': {
                                 'disable_password_authentication': True,
@@ -608,7 +609,8 @@
                         'storage_profile': {
                             'os_disk': {
                                 'os_type': 'Linux',
-                                'name': '{}-{}-edge-disk0'.format(service_base_name, project_name),
+                                'name': '{}-{}-{}-edge-volume-primary'.format(service_base_name, project_name,
+                                                                              os.environ['endpoint_name'].lower()),
                                 'create_option': create_option,
                                 'disk_size_gb': int(primary_disk_size),
                                 'tags': tags,
@@ -637,7 +639,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -648,11 +650,11 @@
                         'data_disks': [
                             {
                                 'lun': 1,
-                                'name': '{}-disk1'.format(instance_name),
+                                'name': '{}-volume-secondary'.format(instance_name),
                                 'create_option': 'empty',
                                 'disk_size_gb': 32,
                                 'tags': {
-                                    'Name': '{}-disk1'.format(instance_name)
+                                    'Name': '{}-volume-secondary'.format(instance_name)
                                 },
                                 'managed_disk': {
                                     'storage_account_type': instance_storage_account_type
@@ -667,7 +669,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -684,7 +686,7 @@
                     },
                     'storage_profile': storage_profile,
                     'os_profile': {
-                        'computer_name': instance_name,
+                        'computer_name': instance_name.replace('_', '-'),
                         'admin_username': dlab_ssh_user_name,
                         'linux_configuration': {
                             'disable_password_authentication': True,
@@ -712,7 +714,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -731,7 +733,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -748,7 +750,7 @@
                     },
                     'storage_profile': storage_profile,
                     'os_profile': {
-                        'computer_name': instance_name,
+                        'computer_name': instance_name.replace('_', '-'),
                         'admin_username': dlab_ssh_user_name,
                         'linux_configuration': {
                             'disable_password_authentication': True,
@@ -1047,18 +1049,18 @@
             print("Downloading local jars for Azure")
             sudo('mkdir -p {}'.format(jars_dir))
             if os.environ['azure_datalake_enable'] == 'false':
-                sudo('wget http://central.maven.org/maven2/org/apache/hadoop/hadoop-azure/{0}/hadoop-azure-{0}.jar -O \
+                sudo('wget https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-azure/{0}/hadoop-azure-{0}.jar -O \
                                  {1}hadoop-azure-{0}.jar'.format(hadoop_version, jars_dir))
-                sudo('wget http://central.maven.org/maven2/com/microsoft/azure/azure-storage/{0}/azure-storage-{0}.jar \
+                sudo('wget https://repo1.maven.org/maven2/com/microsoft/azure/azure-storage/{0}/azure-storage-{0}.jar \
                     -O {1}azure-storage-{0}.jar'.format('2.2.0', jars_dir))
             else:
-                sudo('wget http://central.maven.org/maven2/org/apache/hadoop/hadoop-azure/{0}/hadoop-azure-{0}.jar -O \
+                sudo('wget https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-azure/{0}/hadoop-azure-{0}.jar -O \
                                  {1}hadoop-azure-{0}.jar'.format('3.0.0', jars_dir))
-                sudo('wget http://central.maven.org/maven2/com/microsoft/azure/azure-storage/{0}/azure-storage-{0}.jar \
+                sudo('wget https://repo1.maven.org/maven2/com/microsoft/azure/azure-storage/{0}/azure-storage-{0}.jar \
                                     -O {1}azure-storage-{0}.jar'.format('6.1.0', jars_dir))
-                sudo('wget http://central.maven.org/maven2/com/microsoft/azure/azure-data-lake-store-sdk/{0}/azure-data-lake-store-sdk-{0}.jar \
+                sudo('wget https://repo1.maven.org/maven2/com/microsoft/azure/azure-data-lake-store-sdk/{0}/azure-data-lake-store-sdk-{0}.jar \
                     -O {1}azure-data-lake-store-sdk-{0}.jar'.format('2.2.3', jars_dir))
-                sudo('wget http://central.maven.org/maven2/org/apache/hadoop/hadoop-azure-datalake/{0}/hadoop-azure-datalake-{0}.jar \
+                sudo('wget https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-azure-datalake/{0}/hadoop-azure-datalake-{0}.jar \
                     -O {1}hadoop-azure-datalake-{0}.jar'.format('3.0.0', jars_dir))
             if os.environ['application'] == 'tensor' or os.environ['application'] == 'deeplearning':
                 sudo('wget https://dl.bintray.com/spark-packages/maven/tapanalyticstoolkit/spark-tensorflow-connector/{0}/spark-tensorflow-connector-{0}.jar \
@@ -1082,19 +1084,20 @@
                 spark_jars_paths = sudo('cat /opt/spark/conf/spark-defaults.conf | grep -e "^spark.jars " ')
             except:
                 spark_jars_paths = None
-        user_storage_account_tag = os.environ['conf_service_base_name'] + '-' + (os.environ['project_name'].lower().replace('_', '-')).\
-            replace('_', '-') + '-' + os.environ['endpoint_name'].lower().replace('_', '-') + '-storage'
-        shared_storage_account_tag = '{0}-{1}-shared-storage'.format(os.environ['conf_service_base_name'],
-                                                                     os.environ['endpoint_name'])
+        user_storage_account_tag = "{}-{}-{}-bucket".format(os.environ['conf_service_base_name'],
+                                                            os.environ['project_name'].lower(),
+                                                            os.environ['endpoint_name'].lower())
+        shared_storage_account_tag = '{0}-{1}-shared-bucket'.format(os.environ['conf_service_base_name'],
+                                                                    os.environ['endpoint_name'].lower())
         for storage_account in meta_lib.AzureMeta().list_storage_accounts(os.environ['azure_resource_group_name']):
             if user_storage_account_tag == storage_account.tags["Name"]:
                 user_storage_account_name = storage_account.name
-                user_storage_account_key = meta_lib.AzureMeta().list_storage_keys(os.environ['azure_resource_group_name'],
-                                                                                  user_storage_account_name)[0]
+                user_storage_account_key = meta_lib.AzureMeta().list_storage_keys(
+                    os.environ['azure_resource_group_name'], user_storage_account_name)[0]
             if shared_storage_account_tag == storage_account.tags["Name"]:
                 shared_storage_account_name = storage_account.name
-                shared_storage_account_key = meta_lib.AzureMeta().list_storage_keys(os.environ['azure_resource_group_name'],
-                                                                                    shared_storage_account_name)[0]
+                shared_storage_account_key = meta_lib.AzureMeta().list_storage_keys(
+                    os.environ['azure_resource_group_name'], shared_storage_account_name)[0]
         if os.environ['azure_datalake_enable'] == 'false':
             put(templates_dir + 'core-site-storage.xml', '/tmp/core-site.xml')
         else:
diff --git a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
index 02c5f51..b1d0acb 100644
--- a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
@@ -39,6 +39,7 @@
 import dlab.common_lib
 import backoff
 import ast
+import random
 
 
 class GCPActions:
@@ -188,12 +189,11 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def add_bucket_label(self, bucket_name):
+    def add_bucket_labels(self, bucket_name, tags):
         try:
             bucket = self.storage_client.get_bucket(bucket_name)
-
             labels = bucket.labels
-            labels['name'] = '{}'.format(bucket_name)
+            labels.update(tags)
             bucket.labels = labels
             bucket.patch()
             print('Updated labels on {}.'.format(bucket_name))
@@ -282,7 +282,7 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def create_instance(self, instance_name, cluster_name, region, zone, vpc_name, subnet_name, instance_size,
+    def create_instance(self, instance_name, service_base_name, cluster_name, region, zone, vpc_name, subnet_name, instance_size,
                         ssh_key_path,
                         initial_user, image_name, secondary_image_name, service_account_name, instance_class,
                         network_tag, labels, static_ip='',
@@ -290,9 +290,13 @@
                         gpu_accelerator_type='None'):
         key = RSA.importKey(open(ssh_key_path, 'rb').read())
         ssh_key = key.publickey().exportKey("OpenSSH")
-        service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_account_name,
-                                                                       self.project)
+        unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
         access_configs = ''
+        if instance_class == 'edge':
+            ip_forward = True
+        else:
+            ip_forward = False
         if instance_class == 'ssn' or instance_class == 'edge':
             access_configs = [{
                 "type": "ONE_TO_ONE_NAT",
@@ -374,6 +378,7 @@
             "name": instance_name,
             "machineType": "zones/{}/machineTypes/{}".format(zone, instance_size),
             "labels": labels,
+            "canIpForward": ip_forward,
             "networkInterfaces": [
                 {
                     "network": "global/networks/{}".format(vpc_name),
@@ -508,16 +513,17 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def remove_service_account(self, service_account_name):
-        service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_account_name, self.project)
+    def remove_service_account(self, service_account_name, service_base_name):
+        unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
         request = self.service_iam.projects().serviceAccounts().delete(
             name='projects/{}/serviceAccounts/{}'.format(self.project, service_account_email))
         try:
             result = request.execute()
-            service_account_removed = meta_lib.GCPMeta().get_service_account(service_account_name)
+            service_account_removed = meta_lib.GCPMeta().get_service_account(service_account_name, service_base_name)
             while service_account_removed:
                 time.sleep(5)
-                service_account_removed = meta_lib.GCPMeta().get_service_account(service_account_name)
+                service_account_removed = meta_lib.GCPMeta().get_service_account(service_account_name, service_base_name)
             time.sleep(30)
             print('Service account {} removed.'.format(service_account_name))
             return result
@@ -530,16 +536,18 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def create_service_account(self, service_account_name):
-        params = {"accountId": service_account_name, "serviceAccount": {"displayName": service_account_name}}
+    def create_service_account(self, service_account_name, service_base_name, unique_index):
+        service_account_id = service_base_name + '-' + unique_index
+        print("Creating service account with accountID:" + service_account_id)
+        params = {"accountId": service_account_id, "serviceAccount": {"displayName": service_account_name}}
         request = self.service_iam.projects().serviceAccounts().create(name='projects/{}'.format(self.project),
                                                                        body=params)
         try:
             result = request.execute()
-            service_account_created = meta_lib.GCPMeta().get_service_account(service_account_name)
+            service_account_created = meta_lib.GCPMeta().get_service_account(service_account_name, service_base_name)
             while not service_account_created:
                 time.sleep(5)
-                service_account_created = meta_lib.GCPMeta().get_service_account(service_account_name)
+                service_account_created = meta_lib.GCPMeta().get_service_account(service_account_name, service_base_name)
             time.sleep(30)
             print('Service account {} created.'.format(service_account_name))
             return result
@@ -552,10 +560,13 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def set_role_to_service_account(self, service_account_name, role_name, role_type='custom'):
+    def set_role_to_service_account(self, service_account_name, role_name, service_base_name, role_type='custom',
+                                    num=0):
+        num += 1
         request = GCPActions().service_resource.projects().getIamPolicy(resource=self.project, body={})
         project_policy = request.execute()
-        service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_account_name, self.project)
+        unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
         params = {
             "role": "projects/{}/roles/{}".format(self.project, role_name.replace('-', '_')),
             "members": [
@@ -574,6 +585,10 @@
         try:
             return request.execute()
         except Exception as err:
+            if "There were concurrent policy changes. " \
+               "Please retry the whole read-modify-write with exponential backoff." in str(err) and num <= 10:
+                time.sleep(random.randint(5, 20))
+                self.set_role_to_service_account(service_base_name, role_name, service_base_name, role_type, num)
             logging.info(
                 "Unable to set Service account policy: " + str(err) + "\n Traceback: " + traceback.print_exc(
                     file=sys.stdout))
@@ -687,8 +702,9 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def set_service_account_to_instance(self, service_account_name, instance_name):
-        service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_account_name, self.project)
+    def set_service_account_to_instance(self, service_account_name, instance_name, service_base_name):
+        unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
         params = {
             "email": service_account_email
         }
@@ -738,12 +754,14 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def create_image_from_instance_disks(self, primary_image_name, secondary_image_name, instance_name, zone, lables):
+    def create_image_from_instance_disks(self, primary_image_name, secondary_image_name, instance_name, zone, labels):
         primary_disk_name = "projects/{0}/zones/{1}/disks/{2}".format(self.project, zone, instance_name)
         secondary_disk_name = "projects/{0}/zones/{1}/disks/{2}-secondary".format(self.project, zone, instance_name)
-        primary_params = {"name": primary_image_name, "sourceDisk": primary_disk_name, "labels": lables}
+        labels.update({"name": primary_image_name})
+        primary_params = {"name": primary_image_name, "sourceDisk": primary_disk_name, "labels": labels}
         primary_request = self.service.images().insert(project=self.project, body=primary_params)
-        secondary_params = {"name": secondary_image_name, "sourceDisk": secondary_disk_name, "labels": lables}
+        labels.update({"name": secondary_image_name})
+        secondary_params = {"name": secondary_image_name, "sourceDisk": secondary_disk_name, "labels": labels}
         secondary_request = self.service.images().insert(project=self.project, body=secondary_params)
         id_list=[]
         try:
@@ -814,9 +832,11 @@
         except exceptions.NotFound:
             return False
 
-    def set_bucket_owner(self, bucket_name, service_account):
+    def set_bucket_owner(self, bucket_name, service_account_name, service_base_name):
         try:
-            service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_account, self.project)
+            unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
+            service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index,
+                                                                                  self.project)
             bucket = self.storage_client.get_bucket(bucket_name)
             # setting bucket owner
             acl = bucket.acl
@@ -1282,9 +1302,9 @@
         try:
             templates_dir = '/root/templates/'
             sudo('mkdir -p {}'.format(jars_dir))
-            sudo('wget https://storage.googleapis.com/hadoop-lib/gcs/{0} -O {1}{0}'
-                 .format('gcs-connector-latest-hadoop2.jar', jars_dir))
-            sudo('wget http://central.maven.org/maven2/org/apache/hadoop/hadoop-yarn-server-web-proxy/2.7.4/{0} -O {1}{0}'
+            sudo('wget https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop2-{0}.jar -O {1}'
+                 'gcs-connector-hadoop2-{0}.jar'.format(os.environ['notebook_gcs_connector_version'], jars_dir))
+            sudo('wget https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-yarn-server-web-proxy/2.7.4/{0} -O {1}{0}'
                  .format('hadoop-yarn-server-web-proxy-2.7.4.jar', jars_dir))
             put(templates_dir + 'core-site.xml', '/tmp/core-site.xml')
             sudo('sed -i "s|GCP_PROJECT_ID|{}|g" /tmp/core-site.xml'.format(os.environ['gcp_project_id']))
@@ -1388,6 +1408,7 @@
 
 def remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name):
     try:
+        computational_name = os.environ['computational_name'].replace('_', '-').lower()
         private = meta_lib.get_instance_private_ip_address(cluster_name, notebook_name)
         env.hosts = "{}".format(private)
         env.user = "{}".format(os_user)
@@ -1436,7 +1457,7 @@
             sudo('sleep 5')
             sudo('rm -rf /home/{}/.ensure_dir/dataengine_{}_interpreter_ensured'.format(os_user, cluster_name))
         if exists('/home/{}/.ensure_dir/rstudio_dataengine_ensured'.format(os_user)):
-            dlab.fab.remove_rstudio_dataengines_kernel(os.environ['computational_name'], os_user)
+            dlab.fab.remove_rstudio_dataengines_kernel(computational_name, os_user)
         sudo('rm -rf  /opt/' + cluster_name + '/')
         print("Notebook's {} kernels were removed".format(env.hosts))
     except Exception as err:
diff --git a/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py b/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
index 5e7a96b..cc16028 100644
--- a/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
+++ b/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
@@ -172,7 +172,8 @@
             traceback.print_exc(file=sys.stdout)
 
     def get_instance(self, instance_name):
-        request = self.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
+        meta = GCPMeta()
+        request = meta.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
                                                instance=instance_name)
         try:
             return request.execute()
@@ -183,8 +184,8 @@
                 raise err
         except Exception as err:
             logging.info(
-                "Unable to get Firewall: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-            append_result(str({"error": "Unable to get Firewall",
+                "Unable to get instance: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+            append_result(str({"error": "Unable to get instance",
                                "error_message": str(err) + "\n Traceback: " + traceback.print_exc(
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
@@ -210,7 +211,7 @@
 
     def get_instance_public_ip_by_name(self, instance_name):
         try:
-            result = GCPMeta().get_instance(instance_name)
+            result = self.get_instance(instance_name)
             if result:
                 for i in result.get('networkInterfaces'):
                     for j in i.get('accessConfigs'):
@@ -225,8 +226,56 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def get_service_account(self, service_account_name):
-        service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_account_name, self.project)
+    def get_index_by_service_account_name(self, service_account_name):
+        try:
+            result = self.service_iam.projects().serviceAccounts().list(name='projects/{}'.format(self.project)).execute()
+            full_list_of_service_accounts = []
+            response = ''
+            if result:
+                for account in result['accounts']:
+                    full_list_of_service_accounts.append(account)
+                if 'nextPageToken' in result:
+                    next_page = True
+                    page_token = result['nextPageToken']
+                else:
+                    next_page = False
+                while next_page:
+                    result2 = self.service_iam.projects().serviceAccounts().list(
+                        name='projects/{}'.format(self.project),
+                        pageToken=page_token).execute()
+                    if result2:
+                        for account in result2['accounts']:
+                            full_list_of_service_accounts.append(account)
+                        if 'nextPageToken' in result2:
+                            page_token = result2['nextPageToken']
+                        else:
+                            next_page = False
+                    else:
+                        next_page = False
+                for service_account in full_list_of_service_accounts:
+                    if service_account['displayName'] == service_account_name:
+                        service_account_email = service_account['email']
+                        response = service_account_email[:service_account_email.find('@')][-5:]
+                return response
+            else:
+                print("No service accounts list received.")
+                return response
+
+        except Exception as err:
+            logging.info(
+                "Unable to get index from service account email: " + str(err) + "\n Traceback: " + traceback.print_exc(
+                    file=sys.stdout))
+            append_result(str({"error": "Unable to get index from service account email",
+                               "error_message": str(err) + "\n Traceback: " + traceback.print_exc(
+                                   file=sys.stdout)}))
+            traceback.print_exc(file=sys.stdout)
+
+    def get_service_account(self, service_account_name, service_base_name):
+        unique_index = self.get_index_by_service_account_name(service_account_name)
+        if unique_index == '':
+            service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_base_name, self.project)
+        else:
+            service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
         request = self.service_iam.projects().serviceAccounts().get(
             name='projects/{}/serviceAccounts/{}'.format(self.project, service_account_email))
         try:
@@ -295,7 +344,7 @@
 
     def get_private_ip_address(self, instance_name):
         try:
-            result = GCPMeta().get_instance(instance_name)
+            result = self.get_instance(instance_name)
             for i in result['networkInterfaces']:
                 return i['networkIP']
         except Exception as err:
@@ -650,6 +699,17 @@
             traceback.print_exc(file=sys.stdout)
             return ''
 
+    def dataproc_waiter(self, labels):
+        if os.path.exists(
+                '/response/.emr_creating_' + os.environ['exploratory_name']) or self.get_not_configured_dataproc(
+                os.environ['notebook_instance_name']):
+            with hide('stderr', 'running', 'warnings'):
+                local("echo 'Some Dataproc cluster is still being created/terminated, waiting..'")
+            time.sleep(60)
+            self.dataproc_waiter(labels)
+        else:
+            return True
+
     def get_dataproc_jobs(self):
         jobs = []
         try:
@@ -679,10 +739,10 @@
         try:
             private_list_ip = []
             if conf_type == 'edge_node' or conf_type == 'exploratory':
-                private_list_ip.append(GCPMeta().get_private_ip_address(
+                private_list_ip.append(self.get_private_ip_address(
                 instance_id))
             elif conf_type == 'computational_resource':
-                instance_list = GCPMeta().get_list_instances_by_label(
+                instance_list = self.get_list_instances_by_label(
                     os.environ['gcp_zone'], instance_id)
                 for instance in instance_list.get('items'):
                     private_list_ip.append(instance.get('networkInterfaces')[0].get('networkIP'))
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
index e6a6488..c70e9a9 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
@@ -28,6 +28,49 @@
 import time
 
 
+def manage_pkg(command, environment, requisites):
+    try:
+        attempt = 0
+        installed = False
+        while not installed:
+            print('Pkg installation attempt: {}'.format(attempt))
+            if attempt > 60:
+                print("Notebook is broken please recreate it.")
+                sys.exit(1)
+            else:
+                try:
+                    allow = False
+                    counter = 0
+                    while not allow:
+                        if counter > 60:
+                            print("Notebook is broken please recreate it.")
+                            sys.exit(1)
+                        else:
+                            print('Package manager is:')
+                            if environment == 'remote':
+                                if sudo('pgrep "^apt" -a && echo "busy" || echo "ready"') == 'busy':
+                                    counter += 1
+                                    time.sleep(10)
+                                else:
+                                    allow = True
+                                    sudo('apt-get {0} {1}'.format(command, requisites))
+                            elif environment == 'local':
+                                if local('sudo pgrep "^apt" -a && echo "busy" || echo "ready"', capture=True) == 'busy':
+                                    counter += 1
+                                    time.sleep(10)
+                                else:
+                                    allow = True
+                                    local('sudo apt-get {0} {1}'.format(command, requisites), capture=True)
+                            else:
+                                print('Wrong environment')
+                    installed = True
+                except:
+                    print("Will try to install with nex attempt.")
+                    sudo('dpkg --configure -a')
+                    attempt += 1
+    except:
+        sys.exit(1)
+
 def ensure_pkg(user, requisites='linux-headers-generic python-pip python-dev '
                                 'groff gcc vim less git wget sysv-rc-conf '
                                 'libssl-dev unattended-upgrades nmap '
@@ -45,15 +88,17 @@
                         print("Updating repositories "
                                 "and installing requested tools: {}".format(requisites))
                         print("Attempt number " + str(count) + " to install requested tools. Max 60 tries.")
-                        sudo('apt-get update')
-                        sudo('apt-get -y install ' + requisites)
+                        manage_pkg('update', 'remote', '')
+                        manage_pkg('-y install', 'remote', requisites)
                         sudo('unattended-upgrades -v')
+                        sudo(
+                            'sed -i \'s|APT::Periodic::Unattended-Upgrade "1"|APT::Periodic::Unattended-Upgrade "0"|\' /etc/apt/apt.conf.d/20auto-upgrades')
                         sudo('export LC_ALL=C')
                         sudo('touch /home/{}/.ensure_dir/pkg_upgraded'.format(user))
                         sudo('systemctl enable haveged')
                         sudo('systemctl start haveged')
                         if os.environ['conf_cloud_provider'] == 'aws':
-                            sudo('apt-get -y install --install-recommends linux-aws-hwe')
+                            manage_pkg('-y install --install-recommends', 'remote', 'linux-aws-hwe')
                         check = True
                     except:
                         count += 1
@@ -61,6 +106,7 @@
     except:
         sys.exit(1)
 
+
 def renew_gpg_key():
     try:
         sudo('mv /etc/apt/trusted.gpg /etc/apt/trusted.bkp')
@@ -73,7 +119,7 @@
     if not exists('/tmp/pkg_china_ensured'):
         put('/root/files/sources.list', '/tmp/sources.list')
         sudo('mv /tmp/sources.list /etc/apt/sources.list')
-        sudo('apt-get update')
+        manage_pkg('update', 'remote', '')
         sudo('touch /tmp/pkg_china_ensured')
 
 
@@ -91,7 +137,7 @@
     try:
         if not exists('/home/{}/.ensure_dir/ntpd_ensured'.format(user)):
             sudo('timedatectl set-ntp no')
-            sudo('apt-get -y install ntp ntpdate')
+            manage_pkg('-y install', 'remote', 'ntp ntpdate')
             sudo('echo "tinker panic 0" >> /etc/ntp.conf')
             if os.environ['conf_resource'] != 'ssn' and os.environ['conf_resource'] != 'edge':
                 sudo('echo "server {} prefer iburst" >> /etc/ntp.conf'.format(edge_private_ip))
@@ -100,3 +146,24 @@
             sudo('touch /home/{}/.ensure_dir/ntpd_ensured'.format(user))
     except:
         sys.exit(1)
+
+
+def ensure_java(user):
+    try:
+        if not exists('/home/{}/.ensure_dir/java_ensured'.format(user)):
+            manage_pkg('-y install', 'remote', 'openjdk-8-jdk')
+            sudo('touch /home/{}/.ensure_dir/java_ensured'.format(user))
+    except:
+        sys.exit(1)
+
+
+def ensure_step(user):
+    try:
+        if not exists('/home/{}/.ensure_dir/step_ensured'.format(user)):
+            manage_pkg('-y install', 'remote', 'wget')
+            sudo('wget https://github.com/smallstep/cli/releases/download/v0.13.3/step-cli_0.13.3_amd64.deb '
+                 '-O /tmp/step-cli_0.13.3_amd64.deb')
+            sudo('dpkg -i /tmp/step-cli_0.13.3_amd64.deb')
+            sudo('touch /home/{}/.ensure_dir/step_ensured'.format(user))
+    except:
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
index 31f3095..582d58e 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
@@ -25,12 +25,13 @@
 import sys
 from fabric.api import *
 from fabric.contrib.files import exists
+from dlab.common_lib import manage_pkg
 
 
 def configure_http_proxy_server(config):
     try:
         if not exists('/tmp/http_proxy_ensured'):
-            sudo('apt-get -y install squid')
+            manage_pkg('-y install', 'remote', 'squid')
             template_file = config['template_file']
             proxy_subnet = config['exploratory_subnet']
             put(template_file, '/tmp/squid.conf')
@@ -58,39 +59,127 @@
         sys.exit(1)
 
 
-def install_nginx_ldap(edge_ip, nginx_version, ldap_ip, ldap_dn, ldap_ou, ldap_service_pass, ldap_service_username):
+def install_nginx_lua(edge_ip, nginx_version, keycloak_auth_server_url, keycloak_realm_name, keycloak_client_id,
+                      keycloak_client_secret, user, hostname, step_cert_sans):
     try:
         if not os.path.exists('/tmp/nginx_installed'):
-            sudo('apt-get install -y wget')
-            sudo('apt-get -y install gcc build-essential make zlib1g-dev libpcre++-dev libssl-dev git libldap2-dev')
-            sudo('mkdir -p /tmp/nginx_auth_ldap')
-            with cd('/tmp/nginx_auth_ldap'):
-                sudo('git clone https://github.com/kvspb/nginx-auth-ldap.git')
+            manage_pkg('-y install', 'remote', 'wget')
+            manage_pkg('-y install', 'remote', 'gcc build-essential make automake zlib1g-dev libpcre++-dev libssl-dev git libldap2-dev libc6-dev libgd-dev libgeoip-dev libpcre3-dev apt-utils autoconf liblmdb-dev libtool libxml2-dev libyajl-dev pkgconf liblua5.1-0 liblua5.1-0-dev libreadline-dev libreadline6-dev libtinfo-dev libtool-bin lua5.1 zip readline-doc')
+            if os.environ['conf_stepcerts_enabled'] == 'true':
+                sudo('mkdir -p /home/{0}/keys'.format(user))
+                sudo('''bash -c 'echo "{0}" | base64 --decode > /etc/ssl/certs/root_ca.crt' '''.format(
+                     os.environ['conf_stepcerts_root_ca']))
+                fingerprint = sudo('step certificate fingerprint /etc/ssl/certs/root_ca.crt')
+                sudo('step ca bootstrap --fingerprint {0} --ca-url "{1}"'.format(fingerprint,
+                                                                                 os.environ['conf_stepcerts_ca_url']))
+                sudo('echo "{0}" > /home/{1}/keys/provisioner_password'.format(
+                     os.environ['conf_stepcerts_kid_password'], user))
+                sans = "--san localhost --san 127.0.0.1 {0}".format(step_cert_sans)
+                cn = edge_ip
+                sudo('step ca token {3} --kid {0} --ca-url "{1}" --root /etc/ssl/certs/root_ca.crt '
+                     '--password-file /home/{2}/keys/provisioner_password {4} --output-file /tmp/step_token'.format(
+                      os.environ['conf_stepcerts_kid'], os.environ['conf_stepcerts_ca_url'], user, cn, sans))
+                token = sudo('cat /tmp/step_token')
+                sudo('step ca certificate "{0}" /etc/ssl/certs/dlab.crt /etc/ssl/certs/dlab.key '
+                     '--token "{1}" --kty=RSA --size 2048 --provisioner {2} '.format(cn, token,
+                                                                                     os.environ['conf_stepcerts_kid']))
+                sudo('touch /var/log/renew_certificates.log')
+                put('/root/templates/manage_step_certs.sh', '/usr/local/bin/manage_step_certs.sh', use_sudo=True)
+                sudo('chmod +x /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_ROOT_CERT_PATH|/etc/ssl/certs/root_ca.crt|g" '
+                     '/usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_CERT_PATH|/etc/ssl/certs/dlab.crt|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_KEY_PATH|/etc/ssl/certs/dlab.key|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_CA_URL|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(
+                    os.environ['conf_stepcerts_ca_url']))
+                sudo('sed -i "s|RESOURCE_TYPE|edge|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|SANS|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(sans))
+                sudo('sed -i "s|CN|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(cn))
+                sudo('sed -i "s|KID|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(
+                    os.environ['conf_stepcerts_kid']))
+                sudo('sed -i "s|STEP_PROVISIONER_PASSWORD_PATH|/home/{0}/keys/provisioner_password|g" '
+                     '/usr/local/bin/manage_step_certs.sh'.format(user))
+                sudo('bash -c \'echo "0 * * * * root /usr/local/bin/manage_step_certs.sh >> '
+                     '/var/log/renew_certificates.log 2>&1" >> /etc/crontab \'')
+                put('/root/templates/step-cert-manager.service', '/etc/systemd/system/step-cert-manager.service',
+                    use_sudo=True)
+                sudo('systemctl daemon-reload')
+                sudo('systemctl enable step-cert-manager.service')
+            else:
+                sudo('openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout /etc/ssl/certs/dlab.key \
+                     -out /etc/ssl/certs/dlab.crt -subj "/C=US/ST=US/L=US/O=dlab/CN={}"'.format(hostname))
+            sudo('mkdir -p /tmp/lua')
             sudo('mkdir -p /tmp/src')
             with cd('/tmp/src/'):
                 sudo('wget http://nginx.org/download/nginx-{}.tar.gz'.format(nginx_version))
                 sudo('tar -xzf nginx-{}.tar.gz'.format(nginx_version))
+
+                sudo('wget https://github.com/openresty/lua-nginx-module/archive/v0.10.15.tar.gz')
+                sudo('tar -xzf v0.10.15.tar.gz')
+
+                sudo('wget https://github.com/simplresty/ngx_devel_kit/archive/v0.3.1.tar.gz')
+                sudo('tar -xzf v0.3.1.tar.gz')
+
+                sudo('wget http://luajit.org/download/LuaJIT-2.0.5.tar.gz')
+                sudo('tar -xzf LuaJIT-2.0.5.tar.gz')
+
+                sudo('wget http://keplerproject.github.io/luarocks/releases/luarocks-2.2.2.tar.gz')
+                sudo('tar -xzf luarocks-2.2.2.tar.gz')
+
                 sudo('ln -sf nginx-{} nginx'.format(nginx_version))
-            with cd('/tmp/src/nginx/'):
+
+            with cd('/tmp/src/LuaJIT-2.0.5/'):
+                sudo('make')
+                sudo('make install')
+
+            with cd('/tmp/src/nginx/'), shell_env(LUAJIT_LIB='/usr/local/lib/', LUAJIT_INC='/usr/local/include/luajit-2.0'):
                 sudo('./configure --user=nginx --group=nginx --prefix=/etc/nginx --sbin-path=/usr/sbin/nginx \
                               --conf-path=/etc/nginx/nginx.conf --pid-path=/run/nginx.pid --lock-path=/run/lock/subsys/nginx \
                               --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log \
                               --with-http_gzip_static_module --with-http_stub_status_module --with-http_ssl_module --with-pcre \
-                              --with-http_realip_module --with-file-aio --with-ipv6 --with-http_v2_module --with-debug \
+                              --with-http_realip_module --with-file-aio --with-ipv6 --with-http_v2_module --with-ld-opt="-Wl,-rpath,$LUAJIT_LIB"  \
                               --without-http_scgi_module --without-http_uwsgi_module --without-http_fastcgi_module --with-http_sub_module \
-                              --add-module=/tmp/nginx_auth_ldap/nginx-auth-ldap/')
+                              --add-dynamic-module=/tmp/src/ngx_devel_kit-0.3.1 --add-dynamic-module=/tmp/src/lua-nginx-module-0.10.15')
                 sudo('make')
                 sudo('make install')
+
+            with cd('/tmp/src/luarocks-2.2.2/'):
+                sudo('./configure')
+                sudo('make build')
+                sudo('make install')
+                sudo('wget https://luarocks.org/manifests/cdbattags/lua-resty-jwt-0.2.0-0.src.rock')
+                sudo('luarocks build lua-resty-jwt-0.2.0-0.src.rock')
+                sudo('wget https://luarocks.org/manifests/bungle/lua-resty-session-2.26-1.src.rock')
+                sudo('luarocks build lua-resty-session-2.26-1.src.rock')
+                sudo('wget https://luarocks.org/manifests/pintsized/lua-resty-http-0.15-0.src.rock')
+                sudo('luarocks build lua-resty-http-0.15-0.src.rock')
+                sudo('wget https://luarocks.org/manifests/hanszandbelt/lua-resty-openidc-1.7.2-1.src.rock')
+                sudo('luarocks build lua-resty-openidc-1.7.2-1.src.rock')
+                sudo('wget https://luarocks.org/manifests/starius/luacrypto-0.3.2-2.src.rock')
+                sudo('luarocks build luacrypto-0.3.2-2.src.rock')
+                sudo('wget https://luarocks.org/manifests/openresty/lua-cjson-2.1.0.6-1.src.rock')
+                sudo('luarocks build lua-cjson-2.1.0.6-1.src.rock')
+                sudo('wget https://luarocks.org/manifests/avlubimov/lua-resty-core-0.1.17-4.src.rock')
+                sudo('luarocks build lua-resty-core-0.1.17-4.src.rock')
+                sudo('wget https://luarocks.org/manifests/hjpotter92/random-1.1-0.rockspec')
+                sudo('luarocks install random-1.1-0.rockspec')
+                sudo('wget https://luarocks.org/manifests/rsander/lua-resty-string-0.09-0.rockspec')
+                sudo('luarocks install lua-resty-string-0.09-0.rockspec')
+
             sudo('useradd -r nginx')
             sudo('rm -f /etc/nginx/nginx.conf')
             sudo('mkdir -p /opt/dlab/templates')
             put('/root/templates', '/opt/dlab', use_sudo=True)
-            sudo('sed -i \'s/LDAP_IP/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_ip))
-            sudo('sed -i \'s/LDAP_DN/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_dn))
-            sudo('sed -i \'s/LDAP_OU/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_ou))
-            sudo('sed -i \'s/LDAP_SERVICE_PASSWORD/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_service_pass))
-            sudo('sed -i \'s/LDAP_SERVICE_USERNAME/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_service_username))
             sudo('sed -i \'s/EDGE_IP/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(edge_ip))
+            sudo('sed -i \'s|KEYCLOAK_AUTH_URL|{}|g\' /opt/dlab/templates/conf.d/proxy.conf'.format(
+                keycloak_auth_server_url))
+            sudo('sed -i \'s/KEYCLOAK_REALM_NAME/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(
+                keycloak_realm_name))
+            sudo('sed -i \'s/KEYCLOAK_CLIENT_ID/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(
+                keycloak_client_id))
+            sudo('sed -i \'s/KEYCLOAK_CLIENT_SECRET/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(
+                keycloak_client_secret))
+
             sudo('cp /opt/dlab/templates/nginx.conf /etc/nginx/')
             sudo('mkdir /etc/nginx/conf.d')
             sudo('cp /opt/dlab/templates/conf.d/proxy.conf /etc/nginx/conf.d/')
@@ -103,4 +192,4 @@
             sudo('touch /tmp/nginx_installed')
     except Exception as err:
         print("Failed install nginx with ldap: " + str(err))
-        sys.exit(1)
+        sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/notebook_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/notebook_lib.py
index 35a46f8..d1fa44a 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/notebook_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/notebook_lib.py
@@ -64,7 +64,7 @@
             sudo('\cp -f /tmp/r_template.json {}/ir/kernel.json'.format(kernels_dir))
             sudo('ln -s /opt/spark/ /usr/local/spark')
             try:
-                sudo('cd /usr/local/spark/R/lib/SparkR; R -e "install.packages(\'roxygen2\',repos=\'http://cran.us.r-project.org\')" R -e "devtools::check(\'.\')"')
+                sudo('cd /usr/local/spark/R/lib/SparkR; R -e "install.packages(\'roxygen2\',repos=\'https://cloud.r-project.org\')" R -e "devtools::check(\'.\')"')
             except:
                 pass
             sudo('cd /usr/local/spark/R/lib/SparkR; R -e "devtools::install(\'.\')"')
@@ -86,12 +86,12 @@
             if region == 'cn-north-1':
                 r_repository = r_mirror
             else:
-                r_repository = 'http://cran.us.r-project.org'
+                r_repository = 'https://cloud.r-project.org'
             add_marruter_key()
             sudo('apt update')
-            sudo('apt-get install -y libcurl4-openssl-dev libssl-dev libreadline-dev')
-            sudo('apt-get install -y cmake')
-            sudo('apt-get install -y r-base r-base-dev')
+            manage_pkg('-y install', 'remote', 'libcurl4-openssl-dev libssl-dev libreadline-dev')
+            manage_pkg('-y install', 'remote', 'cmake')
+            manage_pkg('-y install', 'remote', 'r-base r-base-dev')
             sudo('R CMD javareconf')
             sudo('cd /root; git clone https://github.com/zeromq/zeromq4-x.git; cd zeromq4-x/; mkdir build; cd build; cmake ..; make install; ldconfig')
             for i in r_libs:
@@ -112,16 +112,16 @@
 def install_rstudio(os_user, local_spark_path, rstudio_pass, rstudio_version):
     if not exists('/home/' + os_user + '/.ensure_dir/rstudio_ensured'):
         try:
-            sudo('apt-get install -y r-base')
-            sudo('apt-get install -y gdebi-core')
-            sudo('wget https://download2.rstudio.org/rstudio-server-{}-amd64.deb'.format(rstudio_version))
+            manage_pkg('-y install', 'remote', 'r-base')
+            manage_pkg('-y install', 'remote', 'gdebi-core')
+            sudo('wget https://download2.rstudio.org/server/trusty/amd64/rstudio-server-{}-amd64.deb'.format(rstudio_version))
             sudo('gdebi -n rstudio-server-{}-amd64.deb'.format(rstudio_version))
             sudo('mkdir -p /mnt/var')
             sudo('chown {0}:{0} /mnt/var'.format(os_user))
-            if os.environ['application'] == 'tensor-rstudio':
-                sudo("sed -i '/ExecStart/s|=|=/bin/bash -c \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; |g' /etc/systemd/system/rstudio-server.service")
-                sudo("sed -i '/ExecStart/s|$|\"|g' /etc/systemd/system/rstudio-server.service")
-                sudo("systemctl daemon-reload")
+            sudo("sed -i '/Type=forking/a \Environment=USER=dlab-user' /etc/systemd/system/rstudio-server.service")
+            sudo("sed -i '/ExecStart/s|=/usr/lib/rstudio-server/bin/rserver|=/bin/bash -c \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; /usr/lib/rstudio-server/bin/rserver --auth-none 1|g' /etc/systemd/system/rstudio-server.service")
+            sudo("sed -i '/ExecStart/s|$|\"|g' /etc/systemd/system/rstudio-server.service")
+            sudo("systemctl daemon-reload")
             sudo('touch /home/{}/.Renviron'.format(os_user))
             sudo('chown {0}:{0} /home/{0}/.Renviron'.format(os_user))
             sudo('''echo 'SPARK_HOME="{0}"' >> /home/{1}/.Renviron'''.format(local_spark_path, os_user))
@@ -151,8 +151,8 @@
     if not exists('/home/' + os_user + '/.ensure_dir/matplot_ensured'):
         try:
             sudo("sudo sed -i~orig -e 's/# deb-src/deb-src/' /etc/apt/sources.list")
-            sudo('sudo apt-get update')
-            sudo('apt-get build-dep -y python-matplotlib')
+            manage_pkg('update', 'remote', '')
+            manage_pkg('-y build-dep', 'remote', 'python-matplotlib')
             sudo('pip2 install matplotlib==2.0.2 --no-cache-dir')
             sudo('pip3 install matplotlib==2.0.2 --no-cache-dir')
             if os.environ['application'] in ('tensor', 'deeplearning'):
@@ -170,11 +170,11 @@
 def ensure_sbt(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/sbt_ensured'):
         try:
-            sudo('apt-get install -y apt-transport-https')
+            manage_pkg('-y install', 'remote', 'apt-transport-https')
             sudo('echo "deb https://dl.bintray.com/sbt/debian /" | sudo tee -a /etc/apt/sources.list.d/sbt.list')
             add_sbt_key()
-            sudo('apt-get update')
-            sudo('apt-get install -y sbt')
+            manage_pkg('update', 'remote', '')
+            manage_pkg('-y install', 'remote', 'sbt')
             sudo('touch /home/' + os_user + '/.ensure_dir/sbt_ensured')
         except:
             sys.exit(1)
@@ -193,8 +193,8 @@
 def ensure_jre_jdk(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/jre_jdk_ensured'):
         try:
-            sudo('apt-get install -y default-jre')
-            sudo('apt-get install -y default-jdk')
+            manage_pkg('-y install', 'remote', 'default-jre')
+            manage_pkg('-y install', 'remote', 'default-jdk')
             sudo('touch /home/' + os_user + '/.ensure_dir/jre_jdk_ensured')
         except:
             sys.exit(1)
@@ -203,7 +203,7 @@
 def ensure_additional_python_libs(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/additional_python_libs_ensured'):
         try:
-            sudo('apt-get install -y libjpeg8-dev zlib1g-dev')
+            manage_pkg('-y install', 'remote', 'libjpeg8-dev zlib1g-dev')
             if os.environ['application'] in ('jupyter', 'zeppelin'):
                 sudo('pip2 install NumPy=={} SciPy pandas Sympy Pillow sklearn --no-cache-dir'.format(os.environ['notebook_numpy_version']))
                 sudo('pip3 install NumPy=={} SciPy pandas Sympy Pillow sklearn --no-cache-dir'.format(os.environ['notebook_numpy_version']))
@@ -231,10 +231,10 @@
     if not exists('/home/' + os_user + '/.ensure_dir/python2_libraries_ensured'):
         try:
             try:
-                sudo('apt-get install -y libssl-dev python-virtualenv')
+                manage_pkg('-y install', 'remote', 'libssl-dev python-virtualenv')
             except:
                 sudo('pip2 install virtualenv --no-cache-dir')
-                sudo('apt-get install -y libssl-dev')
+                manage_pkg('-y install', 'remote', 'libssl-dev')
             try:
                 sudo('pip2 install tornado=={0} ipython ipykernel=={1} --no-cache-dir' \
                      .format(os.environ['notebook_tornado_version'], os.environ['notebook_ipykernel_version']))
@@ -252,8 +252,8 @@
 def ensure_python3_libraries(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/python3_libraries_ensured'):
         try:
-            sudo('apt-get install python3-setuptools')
-            sudo('apt install -y python3-pip')
+            manage_pkg('-y install', 'remote', 'python3-setuptools')
+            manage_pkg('-y install', 'remote', 'python3-pip')
             try:
                 sudo('pip3 install tornado=={0} ipython==7.9.0 ipykernel=={1} --no-cache-dir' \
                      .format(os.environ['notebook_tornado_version'], os.environ['notebook_ipykernel_version']))
@@ -279,10 +279,10 @@
             sudo('update-initramfs -u')
             with settings(warn_only=True):
                 reboot(wait=150)
-            sudo('apt-get -y install dkms')
+            manage_pkg('-y install', 'remote', 'dkms')
             kernel_version = run('uname -r | tr -d "[..0-9-]"')
             if kernel_version == 'azure':
-                sudo('apt-get -y install linux-modules-`uname -r`')
+                manage_pkg('-y install', 'remote', 'linux-modules-`uname -r`')
             else:
                 #legacy support for old kernels
                 sudo('if [[ $(apt-cache search linux-image-`uname -r`) ]]; then apt-get -y install linux-image-`uname -r`; else apt-get -y install linux-modules-`uname -r`; fi;')
@@ -324,13 +324,19 @@
 
 def install_maven(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/maven_ensured'):
-        sudo('apt-get -y install maven')
+        manage_pkg('-y install', 'remote', 'maven')
         sudo('touch /home/' + os_user + '/.ensure_dir/maven_ensured')
 
+def install_gcloud(os_user):
+    if not exists('/home/' + os_user + '/.ensure_dir/gcloud_ensured'):
+        sudo('echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list')
+        sudo('curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -')
+        manage_pkg('-y install', 'remote', 'google-cloud-sdk')
+        sudo('touch /home/' + os_user + '/.ensure_dir/gcloud_ensured')
 
 def install_livy_dependencies(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/livy_dependencies_ensured'):
-        sudo('apt-get -y install libkrb5-dev')
+        manage_pkg('-y install', 'remote', 'libkrb5-dev')
         sudo('pip2 install cloudpickle requests requests-kerberos flake8 flaky pytest --no-cache-dir')
         sudo('pip3 install cloudpickle requests requests-kerberos flake8 flaky pytest --no-cache-dir')
         sudo('touch /home/' + os_user + '/.ensure_dir/livy_dependencies_ensured')
@@ -338,13 +344,13 @@
 
 def install_maven_emr(os_user):
     if not os.path.exists('/home/' + os_user + '/.ensure_dir/maven_ensured'):
-        local('sudo apt-get -y install maven')
+        manage_pkg('-y install', 'local', 'maven')
         local('touch /home/' + os_user + '/.ensure_dir/maven_ensured')
 
 
 def install_livy_dependencies_emr(os_user):
     if not os.path.exists('/home/' + os_user + '/.ensure_dir/livy_dependencies_ensured'):
-        local('sudo apt-get -y install libkrb5-dev')
+        manage_pkg('-y install', 'local', 'libkrb5-dev')
         local('sudo pip2 install cloudpickle requests requests-kerberos flake8 flaky pytest --no-cache-dir')
         local('sudo pip3 install cloudpickle requests requests-kerberos flake8 flaky pytest --no-cache-dir')
         local('touch /home/' + os_user + '/.ensure_dir/livy_dependencies_ensured')
@@ -353,7 +359,7 @@
 def install_nodejs(os_user):
     if not exists('/home/{}/.ensure_dir/nodejs_ensured'.format(os_user)):
         sudo('curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -')
-        sudo('apt-get install -y nodejs')
+        manage_pkg('-y install', 'remote', 'nodejs')
         sudo('touch /home/{}/.ensure_dir/nodejs_ensured'.format(os_user))
 
 
@@ -362,7 +368,7 @@
     error_parser = "Could not|No matching|Error:|failed|Requires:"
     try:
         print("Updating repositories and installing requested tools: {}".format(requisites))
-        sudo('apt-get update')
+        manage_pkg('update', 'remote', '')
         for os_pkg in requisites:
             sudo('DEBIAN_FRONTEND=noninteractive apt-get -y install {0} 2>&1 | if ! grep -w -E  "({1})" >  /tmp/os_install_{0}.log; then  echo "" > /tmp/os_install_{0}.log;fi'.format(os_pkg, error_parser))
             err = sudo('cat /tmp/os_install_{}.log'.format(os_pkg)).replace('"', "'")
@@ -394,7 +400,7 @@
     try:
         os_pkgs = dict()
         ansi_escape = re.compile(r'\x1b[^m]*m')
-        sudo('apt-get update')
+        manage_pkg('update', 'remote', '')
         apt_raw = sudo("apt list")
         apt_list = ansi_escape.sub('', apt_raw).split("\r\n")
         for pkg in apt_list:
@@ -408,14 +414,12 @@
 def install_caffe2(os_user, caffe2_version, cmake_version):
     if not exists('/home/{}/.ensure_dir/caffe2_ensured'.format(os_user)):
         env.shell = "/bin/bash -l -c -i"
-        sudo('apt-get update')
-        sudo('apt-get install -y --no-install-recommends build-essential cmake git libgoogle-glog-dev libprotobuf-dev'
-             ' protobuf-compiler python-dev python-pip')
+        manage_pkg('update', 'remote', '')
+        manage_pkg('-y install --no-install-recommends', 'remote', 'build-essential cmake git libgoogle-glog-dev libprotobuf-dev protobuf-compiler python-dev python-pip')
         sudo('pip2 install numpy=={} protobuf --no-cache-dir'.format(os.environ['notebook_numpy_version']))
         sudo('pip3 install numpy=={} protobuf --no-cache-dir'.format(os.environ['notebook_numpy_version']))
-        sudo('apt-get install -y --no-install-recommends libgflags-dev')
-        sudo('apt-get install -y --no-install-recommends libgtest-dev libiomp-dev libleveldb-dev liblmdb-dev '
-             'libopencv-dev libopenmpi-dev libsnappy-dev openmpi-bin openmpi-doc python-pydot')
+        manage_pkg('-y install --no-install-recommends', 'remote', 'libgflags-dev')
+        manage_pkg('-y install --no-install-recommends', 'remote', 'libgtest-dev libiomp-dev libleveldb-dev liblmdb-dev libopencv-dev libopenmpi-dev libsnappy-dev openmpi-bin openmpi-doc python-pydot')
         sudo('pip2 install flask graphviz hypothesis jupyter matplotlib==2.0.2 pydot python-nvd3 pyyaml requests scikit-image '
              'scipy setuptools tornado --no-cache-dir')
         sudo('pip3 install flask graphviz hypothesis jupyter matplotlib==2.0.2 pydot python-nvd3 pyyaml requests scikit-image '
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
index 145b7fb..f4cda59 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
@@ -30,6 +30,7 @@
 import json
 import traceback
 import sys
+from dlab.common_lib import manage_pkg
 
 
 def ensure_docker_daemon(dlab_path, os_user, region):
@@ -39,9 +40,9 @@
             sudo('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -')
             sudo('add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) \
                   stable"')
-            sudo('apt-get update')
+            manage_pkg('update', 'remote', '')
             sudo('apt-cache policy docker-ce')
-            sudo('apt-get install -y docker-ce={}~ce-0~ubuntu'.format(docker_version))
+            manage_pkg('-y install', 'remote', 'docker-ce={}~ce~3-0~ubuntu'.format(docker_version))
             sudo('usermod -a -G docker ' + os_user)
             sudo('update-rc.d docker defaults')
             sudo('update-rc.d docker enable')
@@ -54,7 +55,7 @@
 def ensure_nginx(dlab_path):
     try:
         if not exists(dlab_path + 'tmp/nginx_ensured'):
-            sudo('apt-get -y install nginx')
+            manage_pkg('-y install', 'remote', 'nginx')
             sudo('service nginx restart')
             sudo('update-rc.d nginx defaults')
             sudo('update-rc.d nginx enable')
@@ -70,9 +71,8 @@
         if not exists(dlab_path + 'tmp/jenkins_ensured'):
             sudo('wget -q -O - https://pkg.jenkins.io/debian/jenkins-ci.org.key | apt-key add -')
             sudo('echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list')
-            sudo('apt-get -y update')
-            sudo('apt-get -y install openjdk-8-jdk')
-            sudo('apt-get -y install jenkins')
+            manage_pkg('-y update', 'remote', '')
+            manage_pkg('-y install', 'remote', 'jenkins')
             sudo('touch ' + dlab_path + 'tmp/jenkins_ensured')
     except Exception as err:
         traceback.print_exc()
@@ -105,10 +105,14 @@
     try:
         random_file_part = id_generator(size=20)
         if not exists("/etc/nginx/conf.d/nginx_proxy.conf"):
+            sudo('useradd -r nginx')
             sudo('rm -f /etc/nginx/conf.d/*')
+            put(config['nginx_template_dir'] + 'ssn_nginx.conf', '/tmp/nginx.conf')
             put(config['nginx_template_dir'] + 'nginx_proxy.conf', '/tmp/nginx_proxy.conf')
             sudo("sed -i 's|SSN_HOSTNAME|" + hostname + "|' /tmp/nginx_proxy.conf")
+            sudo('mv /tmp/nginx.conf ' + dlab_path + 'tmp/')
             sudo('mv /tmp/nginx_proxy.conf ' + dlab_path + 'tmp/')
+            sudo('\cp ' + dlab_path + 'tmp/nginx.conf /etc/nginx/')
             sudo('\cp ' + dlab_path + 'tmp/nginx_proxy.conf /etc/nginx/conf.d/')
             sudo('mkdir -p /etc/nginx/locations')
             sudo('rm -f /etc/nginx/sites-enabled/default')
@@ -144,7 +148,7 @@
 def ensure_supervisor():
     try:
         if not exists(os.environ['ssn_dlab_path'] + 'tmp/superv_ensured'):
-            sudo('apt-get -y install supervisor')
+            manage_pkg('-y install', 'remote', 'supervisor')
             sudo('update-rc.d supervisor defaults')
             sudo('update-rc.d supervisor enable')
             sudo('touch ' + os.environ['ssn_dlab_path'] + 'tmp/superv_ensured')
@@ -159,7 +163,7 @@
         if not exists(os.environ['ssn_dlab_path'] + 'tmp/mongo_ensured'):
             sudo('apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927')
             sudo('ver=`lsb_release -cs`; echo "deb http://repo.mongodb.org/apt/ubuntu $ver/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list; apt-get update')
-            sudo('apt-get -y --allow-unauthenticated install mongodb-org')
+            manage_pkg('-y --allow-unauthenticated install', 'remote', 'mongodb-org')
             sudo('systemctl enable mongod.service')
             sudo('touch ' + os.environ['ssn_dlab_path'] + 'tmp/mongo_ensured')
     except Exception as err:
@@ -176,7 +180,8 @@
              locale, region_info, ldap_login, tenant_id,
              application_id, hostname, data_lake_name, subscription_id,
              validate_permission_scope, dlab_id, usage_date, product,
-             usage_type, usage, cost, resource_id, tags, billing_dataset_name, report_path=''):
+             usage_type, usage, cost, resource_id, tags, billing_dataset_name, keycloak_client_id,
+             keycloak_client_secret, keycloak_auth_server_url, report_path=''):
     try:
         if not exists(os.environ['ssn_dlab_path'] + 'tmp/ss_started'):
             java_path = sudo("update-alternatives --query java | grep 'Value: ' | grep -o '/.*/jre'")
@@ -191,16 +196,16 @@
             sudo('mv /tmp/ssn.yml ' + os.environ['ssn_dlab_path'] + 'conf/')
             put('/root/templates/proxy_location_webapp_template.conf', '/tmp/proxy_location_webapp_template.conf')
             sudo('mv /tmp/proxy_location_webapp_template.conf ' + os.environ['ssn_dlab_path'] + 'tmp/')
-            if cloud_provider == 'gcp':
-                conf_parameter_name = '--spring.config.location='
+            if cloud_provider == 'aws':
+                conf_parameter_name = '--spring.config.location={0}billing_app.yml --conf '.format(dlab_conf_dir)
                 with open('/root/templates/supervisor_svc.conf', 'r') as f:
                     text = f.read()
                 text = text.replace('WEB_CONF', dlab_conf_dir).replace('OS_USR', os_user)\
                     .replace('CONF_PARAMETER_NAME', conf_parameter_name)
                 with open('/root/templates/supervisor_svc.conf', 'w') as f:
                     f.write(text)
-            elif cloud_provider == 'aws' or 'azure':
-                conf_parameter_name = '--conf '
+            elif cloud_provider == 'gcp' or cloud_provider == 'azure':
+                conf_parameter_name = '--spring.config.location='
                 with open('/root/templates/supervisor_svc.conf', 'r') as f:
                     text = f.read()
                 text = text.replace('WEB_CONF', dlab_conf_dir).replace('OS_USR', os_user)\
@@ -229,7 +234,8 @@
                         item['key'], item['value']))
                 sudo('sed -i "s|SERVICE_BASE_NAME|{0}|g" /tmp/yml_tmp/self-service.yml'.format(service_base_name))
                 sudo('sed -i "s|OPERATION_SYSTEM|debian|g" /tmp/yml_tmp/self-service.yml')
-
+                sudo('sed -i "s|<SSN_INSTANCE_SIZE>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(
+                    os.environ['{0}_ssn_instance_size'.format(os.environ['conf_cloud_provider'])]))
                 if cloud_provider == 'azure':
                     sudo('sed -i "s|<LOGIN_USE_LDAP>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(ldap_login))
                     sudo('sed -i "s|<LOGIN_TENANT_ID>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(tenant_id))
@@ -280,7 +286,15 @@
                          '--cost {} ' \
                          '--resource_id {} ' \
                          '--tags {} ' \
-                         '--billing_dataset_name "{}" '.\
+                         '--billing_dataset_name "{}" '\
+                         '--mongo_host localhost ' \
+                         '--mongo_port 27017 ' \
+                         '--service_base_name {} ' \
+                         '--os_user {} ' \
+                         '--keystore_password {} ' \
+                         '--keycloak_client_id {} ' \
+                         '--keycloak_client_secret {} ' \
+                         '--keycloak_auth_server_url {} '.\
                             format(cloud_provider,
                                    service_base_name,
                                    tag_resource_id,
@@ -304,15 +318,36 @@
                                    cost,
                                    resource_id,
                                    tags,
-                                   billing_dataset_name)
+                                   billing_dataset_name,
+                                   service_base_name,
+                                   os_user,
+                                   keystore_passwd,
+                                   keycloak_client_id,
+                                   keycloak_client_secret,
+                                   keycloak_auth_server_url)
                 sudo('python /tmp/configure_billing.py {}'.format(params))
             try:
-                sudo('keytool -genkeypair -alias dlab -keyalg RSA -validity 730 -storepass {1} -keypass {1} \
-                     -keystore /home/{0}/keys/dlab.keystore.jks -keysize 2048 -dname "CN=localhost"'.format(os_user, keystore_passwd))
-                sudo('keytool -exportcert -alias dlab -storepass {1} -file /home/{0}/keys/dlab.crt \
-                     -keystore /home/{0}/keys/dlab.keystore.jks'.format(os_user, keystore_passwd))
-                sudo('keytool -importcert -trustcacerts -alias dlab -file /home/{0}/keys/dlab.crt -noprompt \
-                     -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_path))
+                if os.environ['conf_stepcerts_enabled'] == 'true':
+                    sudo('openssl pkcs12 -export -in /etc/ssl/certs/dlab.crt -inkey /etc/ssl/certs/dlab.key -name ssn '
+                         '-out ssn.p12 -password pass:{0}'.format(keystore_passwd))
+                    sudo('keytool -importkeystore -srckeystore ssn.p12 -srcstoretype PKCS12 -alias ssn -destkeystore '
+                         '/home/{0}/keys/ssn.keystore.jks -deststorepass "{1}" -srcstorepass "{1}"'.format(
+                          os_user, keystore_passwd))
+                    sudo('keytool -keystore /home/{0}/keys/ssn.keystore.jks -alias step-ca -import -file '
+                         '/etc/ssl/certs/root_ca.crt  -deststorepass "{1}" -srcstorepass "{1}" -noprompt'.format(
+                          os_user, keystore_passwd))
+                    sudo('keytool -importcert -trustcacerts -alias step-ca -file /etc/ssl/certs/root_ca.crt '
+                         '-noprompt -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_path))
+                    sudo('keytool -importcert -trustcacerts -alias ssn -file /etc/ssl/certs/dlab.crt -noprompt '
+                         '-storepass changeit -keystore {0}/lib/security/cacerts'.format(java_path))
+                else:
+                    sudo('keytool -genkeypair -alias ssn -keyalg RSA -validity 730 -storepass {1} -keypass {1} \
+                         -keystore /home/{0}/keys/ssn.keystore.jks -keysize 2048 -dname "CN=localhost"'.format(
+                         os_user, keystore_passwd))
+                    sudo('keytool -exportcert -alias ssn -storepass {1} -file /etc/ssl/certs/dlab.crt \
+                         -keystore /home/{0}/keys/ssn.keystore.jks'.format(os_user, keystore_passwd))
+                    sudo('keytool -importcert -trustcacerts -alias ssn -file /etc/ssl/certs/dlab.crt -noprompt \
+                         -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_path))
             except:
                 append_result("Unable to generate cert and copy to java keystore")
                 sys.exit(1)
@@ -330,14 +365,14 @@
     try:
         if not exists('{}tmp/build_dep_ensured'.format(os.environ['ssn_dlab_path'])):
             maven_version = '3.5.4'
-            sudo('apt-get install -y openjdk-8-jdk git wget unzip')
+            manage_pkg('-y install', 'remote', 'openjdk-8-jdk git wget unzip')
             with cd('/opt/'):
                 sudo('wget http://mirrors.sonic.net/apache/maven/maven-{0}/{1}/binaries/apache-maven-{1}-bin.zip'.format(
                     maven_version.split('.')[0], maven_version))
                 sudo('unzip apache-maven-{}-bin.zip'.format(maven_version))
                 sudo('mv apache-maven-{} maven'.format(maven_version))
             sudo('bash -c "curl --silent --location https://deb.nodesource.com/setup_12.x | bash -"')
-            sudo('apt-get install -y nodejs')
+            manage_pkg('-y install', 'remote', 'nodejs')
             sudo('npm config set unsafe-perm=true')
             sudo('touch {}tmp/build_dep_ensured'.format(os.environ['ssn_dlab_path']))
     except Exception as err:
diff --git a/infrastructure-provisioning/src/general/lib/os/fab.py b/infrastructure-provisioning/src/general/lib/os/fab.py
index a65cefa..cd15d42 100644
--- a/infrastructure-provisioning/src/general/lib/os/fab.py
+++ b/infrastructure-provisioning/src/general/lib/os/fab.py
@@ -32,6 +32,7 @@
 import dlab.actions_lib
 import re
 import traceback
+from dlab.common_lib import *
 
 
 def ensure_pip(requisites):
@@ -40,6 +41,7 @@
             sudo('echo PATH=$PATH:/usr/local/bin/:/opt/spark/bin/ >> /etc/profile')
             sudo('echo export PATH >> /etc/profile')
             sudo('pip install -UI pip=={} --no-cache-dir'.format(os.environ['conf_pip_version']))
+            sudo('pip install --upgrade setuptools')
             sudo('pip install -U {} --no-cache-dir'.format(requisites))
             sudo('touch /home/{}/.ensure_dir/pip_path_added'.format(os.environ['conf_os_user']))
     except:
@@ -118,6 +120,12 @@
 def append_result(error, exception=''):
     ts = time.time()
     st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
+    if exception:
+        error_message = "[Error-{}]: {}. Exception: {}".format(st, error, str(exception))
+        print(error_message)
+    else:
+        error_message = "[Error-{}]: {}.".format(st, error)
+        print(error_message)
     with open('/root/result.json', 'a+') as f:
         text = f.read()
     if len(text) == 0:
@@ -126,10 +134,7 @@
             f.write(res)
     with open("/root/result.json") as f:
         data = json.load(f)
-    if exception:
-        data['error'] = data['error'] + " [Error-" + st + "]:" + error + " Exception: " + str(exception)
-    else:
-        data['error'] = data['error'] + " [Error-" + st + "]:" + error
+    data['error'] = data['error'] + error_message
     with open("/root/result.json", 'w') as f:
         json.dump(data, f)
     print(data)
@@ -146,7 +151,7 @@
 def configure_jupyter(os_user, jupyter_conf_file, templates_dir, jupyter_version, exploratory_name):
     if not exists('/home/' + os_user + '/.ensure_dir/jupyter_ensured'):
         try:
-            sudo('pip2 install notebook=={} --no-cache-dir'.format(jupyter_version))
+            sudo('pip2 install notebook==5.7.8 --no-cache-dir')
             sudo('pip2 install jupyter --no-cache-dir')
             sudo('pip3.5 install notebook=={} --no-cache-dir'.format(jupyter_version))
             sudo('pip3.5 install jupyter --no-cache-dir')
@@ -193,6 +198,98 @@
             print('Error:', str(err))
             sys.exit(1)
 
+def configure_docker(os_user):
+    try:
+        if not exists('/home/' + os_user + '/.ensure_dir/docker_ensured'):
+            docker_version = os.environ['ssn_docker_version']
+            sudo('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -')
+            sudo('add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) \
+                  stable"')
+            manage_pkg('update', 'remote', '')
+            sudo('apt-cache policy docker-ce')
+            manage_pkg('-y install', 'remote', 'docker-ce={}~ce~3-0~ubuntu'.format(docker_version))
+            sudo('touch /home/{}/.ensure_dir/docker_ensured'.format(os_user))
+    except Exception as err:
+        print('Failed to configure Docker:', str(err))
+        sys.exit(1)
+
+def ensure_jupyterlab_files(os_user, jupyterlab_dir, jupyterlab_image, jupyter_conf_file, jupyterlab_conf_file, exploratory_name, edge_ip):
+    if not exists(jupyterlab_dir):
+        try:
+            sudo('mkdir {}'.format(jupyterlab_dir))
+#            put(templates_dir + 'pyspark_local_template.json', '/tmp/pyspark_local_template.json')
+#            put(templates_dir + 'py3spark_local_template.json', '/tmp/py3spark_local_template.json')
+            put('/root/Dockerfile_jupyterlab', '/tmp/Dockerfile_jupyterlab')
+            put('/root/scripts/*', '/tmp/')
+#            sudo('\cp /tmp/pyspark_local_template.json ' + jupyterlab_dir + 'pyspark_local_template.json')
+#            sudo('\cp /tmp/py3spark_local_template.json ' + jupyterlab_dir + 'py3spark_local_template.json')
+#            sudo('sed -i \'s/3.5/3.6/g\' {}py3spark_local_template.json'.format(jupyterlab_dir))
+            sudo('mv /tmp/jupyterlab_run.sh {}jupyterlab_run.sh'.format(jupyterlab_dir))
+            sudo('mv /tmp/Dockerfile_jupyterlab {}Dockerfile_jupyterlab'.format(jupyterlab_dir))
+            sudo('mv /tmp/build.sh {}build.sh'.format(jupyterlab_dir))
+            sudo('mv /tmp/start.sh {}start.sh'.format(jupyterlab_dir))
+            sudo('sed -i \'s/nb_user/{}/g\' {}Dockerfile_jupyterlab'.format(os_user, jupyterlab_dir))
+            sudo('sed -i \'s/jupyterlab_image/{}/g\' {}Dockerfile_jupyterlab'.format(jupyterlab_image, jupyterlab_dir))
+            sudo('sed -i \'s/nb_user/{}/g\' {}start.sh'.format(os_user, jupyterlab_dir))
+#            sudo('sed -i \'s/jup_version/{}/g\' {}Dockerfile_jupyterlab'.format(jupyter_version, jupyterlab_dir))
+#            sudo('sed -i \'s/hadoop_version/{}/g\' {}Dockerfile_jupyterlab'.format(os.environ['notebook_hadoop_version'], jupyterlab_dir))
+#            sudo('sed -i \'s/tornado_version/{}/g\' {}Dockerfile_jupyterlab'.format(os.environ['notebook_tornado_version'], jupyterlab_dir))
+#            sudo('sed -i \'s/matplotlib_version/{}/g\' {}Dockerfile_jupyterlab'.format(os.environ['notebook_matplotlib_version'], jupyterlab_dir))
+#            sudo('sed -i \'s/numpy_version/{}/g\' {}Dockerfile_jupyterlab'.format(os.environ['notebook_numpy_version'], jupyterlab_dir))
+#            sudo('sed -i \'s/spark_version/{}/g\' {}Dockerfile_jupyterlab'.format(os.environ['notebook_spark_version'], jupyterlab_dir))
+#            sudo('sed -i \'s/scala_version/{}/g\' {}Dockerfile_jupyterlab'.format(os.environ['notebook_scala_version'], jupyterlab_dir))
+            sudo('sed -i \'s/CONF_PATH/{}/g\' {}jupyterlab_run.sh'.format(jupyterlab_conf_file, jupyterlab_dir))
+            sudo('touch {}'.format(jupyter_conf_file))
+            sudo('echo "c.NotebookApp.ip = \'0.0.0.0\'" >> {}'.format(jupyter_conf_file))
+            sudo('echo "c.NotebookApp.base_url = \'/{0}/\'" >> {1}'.format(exploratory_name, jupyter_conf_file))
+            sudo('echo c.NotebookApp.open_browser = False >> {}'.format(jupyter_conf_file))
+            sudo('echo \'c.NotebookApp.cookie_secret = b"{0}"\' >> {1}'.format(id_generator(), jupyter_conf_file))
+            sudo('''echo "c.NotebookApp.token = u''" >> {}'''.format(jupyter_conf_file))
+            sudo('echo \'c.KernelSpecManager.ensure_native_kernel = False\' >> {}'.format(jupyter_conf_file))
+            sudo('chown dlab-user:dlab-user /opt')
+            sudo('echo -e "Host git.epam.com\n   HostName git.epam.com\n   ProxyCommand nc -X connect -x {}:3128 %h %p\n" > /home/{}/.ssh/config'.format(edge_ip, os_user))
+            sudo('echo -e "Host github.com\n   HostName github.com\n   ProxyCommand nc -X connect -x {}:3128 %h %p" >> /home/{}/.ssh/config'.format(edge_ip, os_user))
+#            sudo('touch {}'.format(spark_script))
+#            sudo('echo "#!/bin/bash" >> {}'.format(spark_script))
+#            sudo(
+#                'echo "PYJ=\`find /opt/spark/ -name \'*py4j*.zip\' | tr \'\\n\' \':\' | sed \'s|:$||g\'\`; sed -i \'s|PY4J|\'$PYJ\'|g\' /tmp/pyspark_local_template.json" >> {}'.format(
+#                spark_script))
+#            sudo(
+#                'echo "sed -i \'14s/:",/:\\/home\\/dlab-user\\/caffe\\/python:\\/home\\/dlab-user\\/pytorch\\/build:",/\' /tmp/pyspark_local_template.json" >> {}'.format(
+#                    spark_script))
+#            sudo('echo \'sed -i "s|SP_VER|{}|g" /tmp/pyspark_local_template.json\' >> {}'.format(os.environ['notebook_spark_version'], spark_script))
+#            sudo(
+#                'echo "PYJ=\`find /opt/spark/ -name \'*py4j*.zip\' | tr \'\\n\' \':\' | sed \'s|:$||g\'\`; sed -i \'s|PY4J|\'$PYJ\'|g\' /tmp/py3spark_local_template.json" >> {}'.format(
+#                spark_script))
+#            sudo(
+#                'echo "sed -i \'14s/:",/:\\/home\\/dlab-user\\/caffe\\/python:\\/home\\/dlab-user\\/pytorch\\/build:",/\' /tmp/py3spark_local_template.json" >> {}'.format(
+#                    spark_script))
+#            sudo('echo \'sed -i "s|SP_VER|{}|g" /tmp/py3spark_local_template.json\' >> {}'.format(os.environ['notebook_spark_version'], spark_script))
+#            sudo('echo "cp /tmp/pyspark_local_template.json /home/{}/.local/share/jupyter/kernels/pyspark_local/kernel.json" >> {}'.format(os_user, spark_script))
+#            sudo(
+#                'echo "cp /tmp/py3spark_local_template.json /home/{}/.local/share/jupyter/kernels/py3spark_local/kernel.json" >> {}'.format(
+#                    os_user, spark_script))
+#            sudo('git clone https://github.com/legion-platform/legion.git')
+#            sudo('cp {}sdk/Pipfile {}sdk_Pipfile'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp {}sdk/Pipfile.lock {}sdk_Pipfile.lock'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp {}toolchains/python/Pipfile {}toolchains_Pipfile'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp {}toolchains/python/Pipfile.lock {}toolchains_Pipfile.lock'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp {}cli/Pipfile {}cli_Pipfile'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp {}cli/Pipfile.lock {}cli_Pipfile.lock'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp -r {}sdk {}sdk'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp -r {}toolchains/python {}toolchains_python'.format(legion_dir, jupyterlab_dir))
+#            sudo('cp -r {}cli {}cli'.format(legion_dir, jupyterlab_dir))
+        except:
+           sys.exit(1)
+    else:
+        try:
+            sudo(
+                'sed -i "s/c.NotebookApp.base_url =.*/c.NotebookApp.base_url = \'\/{0}\/\'/" {1}'.format(
+                    exploratory_name, jupyter_conf_file))
+        except Exception as err:
+            print('Error:', str(err))
+            sys.exit(1)
+
 
 def ensure_pyspark_local_kernel(os_user, pyspark_local_path_dir, templates_dir, spark_version):
     if not exists('/home/' + os_user + '/.ensure_dir/pyspark_local_kernel_ensured'):
@@ -295,8 +392,8 @@
     try:
         for r_pkg in requisites:
             if r_pkg == 'sparklyr':
-                run('sudo -i R -e \'install.packages("{0}", repos="http://cran.us.r-project.org", dep=TRUE)\' 2>&1 | tee /tmp/tee.tmp; if ! grep -w -E  "({1})" /tmp/tee.tmp > /tmp/install_{0}.log; then  echo "" > /tmp/install_{0}.log;fi'.format(r_pkg, error_parser))
-            sudo('R -e \'install.packages("{0}", repos="http://cran.us.r-project.org", dep=TRUE)\' 2>&1 | tee /tmp/tee.tmp; if ! grep -w -E  "({1})" /tmp/tee.tmp >  /tmp/install_{0}.log; then  echo "" > /tmp/install_{0}.log;fi'.format(r_pkg, error_parser))
+                run('sudo -i R -e \'install.packages("{0}", repos="https://cloud.r-project.org", dep=TRUE)\' 2>&1 | tee /tmp/tee.tmp; if ! grep -w -E  "({1})" /tmp/tee.tmp > /tmp/install_{0}.log; then  echo "" > /tmp/install_{0}.log;fi'.format(r_pkg, error_parser))
+            sudo('R -e \'install.packages("{0}", repos="https://cloud.r-project.org", dep=TRUE)\' 2>&1 | tee /tmp/tee.tmp; if ! grep -w -E  "({1})" /tmp/tee.tmp >  /tmp/install_{0}.log; then  echo "" > /tmp/install_{0}.log;fi'.format(r_pkg, error_parser))
             err = sudo('cat /tmp/install_{0}.log'.format(r_pkg)).replace('"', "'")
             sudo('R -e \'installed.packages()[,c(3:4)]\' | if ! grep -w {0} > /tmp/install_{0}.list; then  echo "" > /tmp/install_{0}.list;fi'.format(r_pkg))
             res = sudo('cat /tmp/install_{0}.list'.format(r_pkg))
@@ -378,7 +475,7 @@
 def get_available_r_pkgs():
     try:
         r_pkgs = dict()
-        sudo('R -e \'write.table(available.packages(contriburl="http://cran.us.r-project.org/src/contrib"), file="/tmp/r.csv", row.names=F, col.names=F, sep=",")\'')
+        sudo('R -e \'write.table(available.packages(contriburl="https://cloud.r-project.org/src/contrib"), file="/tmp/r.csv", row.names=F, col.names=F, sep=",")\'')
         get("/tmp/r.csv", "r.csv")
         with open('r.csv', 'rb') as csvfile:
             reader = csv.reader(csvfile, delimiter=',')
@@ -409,7 +506,7 @@
 def install_ungit(os_user, notebook_name, edge_ip):
     if not exists('/home/{}/.ensure_dir/ungit_ensured'.format(os_user)):
         try:
-            sudo('npm -g install ungit@{}'.format(os.environ['notebook_ungit_version']))
+            manage_npm_pkg('-g install ungit@{}'.format(os.environ['notebook_ungit_version']))
             put('/root/templates/ungit.service', '/tmp/ungit.service')
             sudo("sed -i 's|OS_USR|{}|' /tmp/ungit.service".format(os_user))
             http_proxy = run('echo $http_proxy')
@@ -458,7 +555,7 @@
     run('git config --global https.proxy $https_proxy')
 
 
-def install_inactivity_checker(os_user, ip_adress, rstudio=False):
+def install_inactivity_checker(os_user, ip_address, rstudio=False):
     if not exists('/home/{}/.ensure_dir/inactivity_ensured'.format(os_user)):
         try:
             if not exists('/opt/inactivity'):
@@ -469,7 +566,7 @@
                 put('/root/templates/inactive_rs.sh', '/opt/inactivity/inactive.sh', use_sudo=True)
             else:
                 put('/root/templates/inactive.sh', '/opt/inactivity/inactive.sh', use_sudo=True)
-            sudo("sed -i 's|IP_ADRESS|{}|g' /opt/inactivity/inactive.sh".format(ip_adress))
+            sudo("sed -i 's|IP_ADRESS|{}|g' /opt/inactivity/inactive.sh".format(ip_address))
             sudo("chmod 755 /opt/inactivity/inactive.sh")
             sudo("chown root:root /etc/systemd/system/inactive.service")
             sudo("chown root:root /etc/systemd/system/inactive.timer")
@@ -498,11 +595,11 @@
 
 def install_r_packages(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/r_packages_ensured'):
-        sudo('R -e "install.packages(\'devtools\', repos = \'http://cran.us.r-project.org\')"')
-        sudo('R -e "install.packages(\'knitr\', repos = \'http://cran.us.r-project.org\')"')
-        sudo('R -e "install.packages(\'ggplot2\', repos = \'http://cran.us.r-project.org\')"')
+        sudo('R -e "install.packages(\'devtools\', repos = \'https://cloud.r-project.org\')"')
+        sudo('R -e "install.packages(\'knitr\', repos = \'https://cloud.r-project.org\')"')
+        sudo('R -e "install.packages(\'ggplot2\', repos = \'https://cloud.r-project.org\')"')
         sudo('R -e "install.packages(c(\'devtools\',\'mplot\', \'googleVis\'), '
-             'repos = \'http://cran.us.r-project.org\'); require(devtools); install_github(\'ramnathv/rCharts\')"')
+             'repos = \'https://cloud.r-project.org\'); require(devtools); install_github(\'ramnathv/rCharts\')"')
         sudo('touch /home/' + os_user + '/.ensure_dir/r_packages_ensured')
 
 
@@ -512,19 +609,19 @@
             breeze_tmp_dir = '/tmp/breeze_tmp_local/'
             jars_dir = '/opt/jars/'
             sudo('mkdir -p {}'.format(breeze_tmp_dir))
-            sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze_{0}/{1}/breeze_{0}-{1}.jar -O \
+            sudo('wget https://repo1.maven.org/maven2/org/scalanlp/breeze_{0}/{1}/breeze_{0}-{1}.jar -O \
                     {2}breeze_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir))
-            sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-natives_{0}/{1}/breeze-natives_{0}-{1}.jar -O \
+            sudo('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-natives_{0}/{1}/breeze-natives_{0}-{1}.jar -O \
                     {2}breeze-natives_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir))
-            sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-viz_{0}/{1}/breeze-viz_{0}-{1}.jar -O \
+            sudo('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-viz_{0}/{1}/breeze-viz_{0}-{1}.jar -O \
                     {2}breeze-viz_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir))
-            sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-macros_{0}/{1}/breeze-macros_{0}-{1}.jar -O \
+            sudo('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-macros_{0}/{1}/breeze-macros_{0}-{1}.jar -O \
                     {2}breeze-macros_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir))
-            sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-parent_{0}/{1}/breeze-parent_{0}-{1}.jar -O \
+            sudo('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-parent_{0}/{1}/breeze-parent_{0}-{1}.jar -O \
                     {2}breeze-parent_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir))
-            sudo('wget http://central.maven.org/maven2/org/jfree/jfreechart/{0}/jfreechart-{0}.jar -O \
+            sudo('wget https://repo1.maven.org/maven2/org/jfree/jfreechart/{0}/jfreechart-{0}.jar -O \
                     {1}jfreechart-{0}.jar'.format('1.0.19', breeze_tmp_dir))
-            sudo('wget http://central.maven.org/maven2/org/jfree/jcommon/{0}/jcommon-{0}.jar -O \
+            sudo('wget https://repo1.maven.org/maven2/org/jfree/jcommon/{0}/jcommon-{0}.jar -O \
                     {1}jcommon-{0}.jar'.format('1.0.24', breeze_tmp_dir))
             sudo('wget --no-check-certificate https://brunelvis.org/jar/spark-kernel-brunel-all-{0}.jar -O \
                     {1}spark-kernel-brunel-all-{0}.jar'.format('2.3', breeze_tmp_dir))
@@ -544,6 +641,8 @@
         sudo('ln -s /usr/bin/pip-3.4 /usr/bin/pip3')
     elif not exists('/usr/bin/pip3') and sudo("python3.5 -V 2>/dev/null | awk '{print $2}'"):
         sudo('ln -s /usr/bin/pip-3.5 /usr/bin/pip3')
+    elif not exists('/usr/bin/pip3') and sudo("python3.6 -V 2>/dev/null | awk '{print $2}'"):
+        sudo('ln -s /usr/bin/pip-3.6 /usr/bin/pip3')
     sudo('echo "export PATH=$PATH:/usr/local/bin" >> /etc/profile')
     sudo('source /etc/profile')
     run('source /etc/profile')
@@ -725,4 +824,74 @@
             sudo('touch /home/{}/.ensure_dir/hosts_file_updated'.format(os_user))
     except Exception as err:
         print('Failed to update hosts file', str(err))
+        sys.exit(1)
+
+def ensure_docker_compose(os_user):
+    try:
+        configure_docker(os_user)
+        if not exists('/home/{}/.ensure_dir/docker_compose_ensured'.format(os_user)):
+            docker_compose_version = "1.24.1"
+            sudo('curl -L https://github.com/docker/compose/releases/download/{}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose'.format(docker_compose_version))
+            sudo('chmod +x /usr/local/bin/docker-compose')
+            sudo('touch /home/{}/.ensure_dir/docker_compose_ensured'.format(os_user))
+        sudo('systemctl daemon-reload')
+        sudo('systemctl restart docker')
+        return True
+    except:
+        return False
+
+def configure_superset(os_user, keycloak_auth_server_url, keycloak_realm_name, keycloak_client_id, keycloak_client_secret, edge_instance_private_ip, edge_instance_public_ip, superset_name):
+    print('Superset configuring')
+    try:
+        if not exists('/home/{}/incubator-superset'.format(os_user)):
+            with cd('/home/{}'.format(os_user)):
+                sudo('wget https://github.com/apache/incubator-superset/archive/{}.tar.gz'.format(os.environ['notebook_superset_version']))
+                sudo('tar -xzf {}.tar.gz'.format(os.environ['notebook_superset_version']))
+                sudo('ln -sf incubator-superset-{} incubator-superset'.format(os.environ['notebook_superset_version']))
+        if not exists('/tmp/superset-notebook_installed'):
+            sudo('mkdir -p /opt/dlab/templates')
+            put('/root/templates', '/opt/dlab', use_sudo=True)
+            sudo('sed -i \'s/OS_USER/{}/g\' /opt/dlab/templates/.env'.format(os_user))
+            proxy_string = '{}:3128'.format(edge_instance_private_ip)
+            sudo('sed -i \'s|KEYCLOAK_AUTH_SERVER_URL|{}|g\' /opt/dlab/templates/id_provider.json'.format(keycloak_auth_server_url))
+            sudo('sed -i \'s/KEYCLOAK_REALM_NAME/{}/g\' /opt/dlab/templates/id_provider.json'.format(keycloak_realm_name))
+            sudo('sed -i \'s/CLIENT_ID/{}/g\' /opt/dlab/templates/id_provider.json'.format(keycloak_client_id))
+            sudo('sed -i \'s/CLIENT_SECRET/{}/g\' /opt/dlab/templates/id_provider.json'.format(keycloak_client_secret))
+            sudo('sed -i \'s/PROXY_STRING/{}/g\' /opt/dlab/templates/docker-compose.yml'.format(proxy_string))
+            sudo('sed -i \'s|KEYCLOAK_AUTH_SERVER_URL|{}|g\' /opt/dlab/templates/superset_config.py'.format(keycloak_auth_server_url))
+            sudo('sed -i \'s/KEYCLOAK_REALM_NAME/{}/g\' /opt/dlab/templates/superset_config.py'.format(keycloak_realm_name))
+            sudo('sed -i \'s/EDGE_IP/{}/g\' /opt/dlab/templates/superset_config.py'.format(edge_instance_public_ip))
+            sudo('sed -i \'s/SUPERSET_NAME/{}/g\' /opt/dlab/templates/superset_config.py'.format(superset_name))
+            sudo('cp -f /opt/dlab/templates/.env /home/{}/incubator-superset/contrib/docker/'.format(os_user))
+            sudo('cp -f /opt/dlab/templates/docker-compose.yml /home/{}/incubator-superset/contrib/docker/'.format(os_user))
+            sudo('cp -f /opt/dlab/templates/id_provider.json /home/{}/incubator-superset/contrib/docker/'.format(os_user))
+            sudo('cp -f /opt/dlab/templates/requirements-extra.txt /home/{}/incubator-superset/contrib/docker/'.format(os_user))
+            sudo('cp -f /opt/dlab/templates/superset_config.py /home/{}/incubator-superset/contrib/docker/'.format(os_user))
+            sudo('cp -f /opt/dlab/templates/docker-init.sh /home/{}/incubator-superset/contrib/docker/'.format(os_user))
+            sudo('touch /tmp/superset-notebook_installed')
+    except Exception as err:
+        print("Failed configure superset: " + str(err))
+        sys.exit(1)
+
+def manage_npm_pkg(command):
+    try:
+        npm_count = 0
+        installed = False
+        npm_registry = ['https://registry.npmjs.org/', 'https://registry.npmjs.com/']
+        while not installed:
+            if npm_count > 60:
+                print("NPM registry is not available, please try later")
+                sys.exit(1)
+            else:
+                try:
+                    if npm_count % 2 == 0:
+                        sudo('npm config set registry {}'.format(npm_registry[0]))
+                    else:
+                        sudo('npm config set registry {}'.format(npm_registry[1]))
+                    sudo('npm {}'.format(command))
+                    installed = True
+                except:
+                    npm_count += 1
+                    time.sleep(50)
+    except:
         sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/lib/os/redhat/common_lib.py b/infrastructure-provisioning/src/general/lib/os/redhat/common_lib.py
index 12d2116..ea5d4f2 100644
--- a/infrastructure-provisioning/src/general/lib/os/redhat/common_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/redhat/common_lib.py
@@ -26,6 +26,34 @@
 import sys
 import os
 
+def manage_pkg(command, environment, requisites):
+    try:
+        allow = False
+        counter = 0
+        while not allow:
+            if counter > 60:
+                print("Notebook is broken please recreate it.")
+                sys.exit(1)
+            else:
+                print('Package manager is:')
+                if environment == 'remote':
+                    if sudo('pgrep yum -a && echo "busy" || echo "ready"') == 'busy':
+                        counter += 1
+                        time.sleep(10)
+                    else:
+                        allow = True
+                        sudo('yum {0} {1}'.format(command, requisites))
+                elif environment == 'local':
+                    if local('sudo pgrep yum -a && echo "busy" || echo "ready"', capture=True) == 'busy':
+                        counter += 1
+                        time.sleep(10)
+                    else:
+                        allow = True
+                        local('sudo yum {0} {1}'.format(command, requisites), capture=True)
+                else:
+                    print('Wrong environment')
+    except:
+        sys.exit(1)
 
 def ensure_pkg(user, requisites='git vim gcc python-devel openssl-devel nmap libffi libffi-devel unzip libxml2-devel'):
     try:
@@ -45,15 +73,15 @@
                 sudo('echo "gpgcheck=1" >> centOS-base.repo')
                 sudo('echo "gpgkey=http://{}/centos/7/os/x86_64/RPM-GPG-KEY-CentOS-7" >> centOS-base.repo'.format(mirror))
             sudo('yum-config-manager --enable rhui-REGION-rhel-server-optional')
-            sudo('yum update-minimal --security -y')
-            sudo('yum -y install wget')
+            manage_pkg('update-minimal --security -y', 'remote', '')
+            manage_pkg('-y install', 'remote', 'wget')
             sudo('wget --no-check-certificate https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm')
             sudo('rpm -ivh epel-release-latest-7.noarch.rpm')
-            sudo('yum repolist')
-            sudo('yum -y install python-pip gcc')
+            manage_pkg('repolist', 'remote', '')
+            manage_pkg('-y install', 'remote', 'python-pip gcc')
             sudo('rm -f epel-release-latest-7.noarch.rpm')
             sudo('export LC_ALL=C')
-            sudo('yum -y install ' + requisites)
+            manage_pkg('-y install', 'remote', requisites)
             sudo('touch /home/{}/.ensure_dir/pkg_upgraded'.format(user))
     except:
         sys.exit(1)
@@ -80,7 +108,7 @@
     try:
         if not exists('/home/{}/.ensure_dir/ntpd_ensured'.format(user)):
             sudo('systemctl disable chronyd')
-            sudo('yum -y install ntp')
+            manage_pkg('-y install', 'remote', 'ntp')
             sudo('echo "tinker panic 0" >> /etc/ntp.conf')
             sudo('systemctl start ntpd')
             if os.environ['conf_resource'] != 'ssn' and os.environ['conf_resource'] != 'edge':
@@ -89,4 +117,26 @@
             sudo('systemctl enable ntpd')
             sudo('touch /home/{}/.ensure_dir/ntpd_ensured'.format(user))
     except:
-        sys.exit(1)
\ No newline at end of file
+        sys.exit(1)
+
+
+def ensure_java(user):
+    try:
+        if not exists('/home/{}/.ensure_dir/java_ensured'.format(user)):
+            manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk-devel')
+            sudo('touch /home/{}/.ensure_dir/java_ensured'.format(user))
+    except:
+        sys.exit(1)
+
+
+def ensure_step(user):
+    try:
+        if not exists('/home/{}/.ensure_dir/step_ensured'.format(user)):
+            manage_pkg('-y install', 'remote', 'wget')
+            sudo('wget https://github.com/smallstep/cli/releases/download/v0.13.3/step_0.13.3_linux_amd64.tar.gz '
+                 '-O /tmp/step_0.13.3_linux_amd64.tar.gz')
+            sudo('tar zxvf /tmp/step_0.13.3_linux_amd64.tar.gz -C /tmp/')
+            sudo('mv /tmp/step_0.13.3/bin/step /usr/bin/')
+            sudo('touch /home/{}/.ensure_dir/step_ensured'.format(user))
+    except:
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/lib/os/redhat/edge_lib.py b/infrastructure-provisioning/src/general/lib/os/redhat/edge_lib.py
index 1a6f773..8dde808 100644
--- a/infrastructure-provisioning/src/general/lib/os/redhat/edge_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/redhat/edge_lib.py
@@ -25,12 +25,13 @@
 import sys
 from fabric.api import *
 from fabric.contrib.files import exists
+from dlab.common_lib import manage_pkg
 
 
 def configure_http_proxy_server(config):
     try:
         if not exists('/tmp/http_proxy_ensured'):
-            sudo('yum -y install squid')
+            manage_pkg('-y install', 'remote', 'squid')
             template_file = config['template_file']
             proxy_subnet = config['exploratory_subnet']
             put(template_file, '/tmp/squid.conf')
@@ -58,45 +59,119 @@
         sys.exit(1)
 
 
-def install_nginx_ldap(edge_ip, nginx_version, ldap_ip, ldap_dn, ldap_ou, ldap_service_pass, ldap_service_username):
+def install_nginx_lua(edge_ip, nginx_version, keycloak_auth_server_url, keycloak_realm_name, keycloak_client_id,
+                      keycloak_client_secret, user, hostname, step_cert_sans):
     try:
         if not os.path.exists('/tmp/nginx_installed'):
-            sudo('yum install -y wget')
+            manage_pkg('-y install', 'remote', 'wget')
             sudo('wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm')
             try:
                 sudo('rpm -ivh epel-release-latest-7.noarch.rpm')
             except:
                 print('Looks like EPEL is already installed.')
-            sudo(
-                'yum -y install gcc gcc-c++ make zlib-devel pcre-devel openssl-devel git openldap-devel')
-            sudo('mkdir -p /tmp/nginx_auth_ldap')
-            with cd('/tmp/nginx_auth_ldap'):
-                sudo('git clone https://github.com/kvspb/nginx-auth-ldap.git')
+            manage_pkg('-y install', 'remote', 'gcc gcc-c++ make zlib-devel pcre-devel openssl-devel git openldap-devel')
+            if os.environ['conf_stepcerts_enabled'] == 'true':
+                sudo('mkdir -p /home/{0}/keys'.format(user))
+                sudo('''bash -c 'echo "{0}" | base64 --decode > /etc/ssl/certs/root_ca.crt' '''.format(
+                     os.environ['conf_stepcerts_root_ca']))
+                fingerprint = sudo('step certificate fingerprint /etc/ssl/certs/root_ca.crt')
+                sudo('step ca bootstrap --fingerprint {0} --ca-url "{1}"'.format(fingerprint,
+                                                                                 os.environ['conf_stepcerts_ca_url']))
+                sudo('echo "{0}" > /home/{1}/keys/provisioner_password'.format(
+                     os.environ['conf_stepcerts_kid_password'], user))
+                sans = "--san localhost --san 127.0.0.1 {0}".format(step_cert_sans)
+                cn = edge_ip
+                sudo('step ca token {3} --kid {0} --ca-url "{1}" --root /etc/ssl/certs/root_ca.crt '
+                     '--password-file /home/{2}/keys/provisioner_password {4} --output-file /tmp/step_token'.format(
+                      os.environ['conf_stepcerts_kid'], os.environ['conf_stepcerts_ca_url'], user, cn, sans))
+                token = sudo('cat /tmp/step_token')
+                sudo('step ca certificate "{0}" /etc/ssl/certs/dlab.crt /etc/ssl/certs/dlab.key '
+                     '--token "{1}" --kty=RSA --size 2048 --provisioner {2} '.format(cn, token,
+                                                                                     os.environ['conf_stepcerts_kid']))
+                sudo('touch /var/log/renew_certificates.log')
+                put('/root/templates/manage_step_certs.sh', '/usr/local/bin/manage_step_certs.sh', use_sudo=True)
+                sudo('chmod +x /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_ROOT_CERT_PATH|/etc/ssl/certs/root_ca.crt|g" '
+                     '/usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_CERT_PATH|/etc/ssl/certs/dlab.crt|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_KEY_PATH|/etc/ssl/certs/dlab.key|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_CA_URL|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(
+                    os.environ['conf_stepcerts_ca_url']))
+                sudo('sed -i "s|RESOURCE_TYPE|edge|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|SANS|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(sans))
+                sudo('sed -i "s|CN|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(cn))
+                sudo('sed -i "s|KID|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(
+                    os.environ['conf_stepcerts_kid']))
+                sudo('sed -i "s|STEP_PROVISIONER_PASSWORD_PATH|/home/{0}/keys/provisioner_password|g" '
+                     '/usr/local/bin/manage_step_certs.sh'.format(user))
+                sudo('bash -c \'echo "0 * * * * root /usr/local/bin/manage_step_certs.sh >> '
+                     '/var/log/renew_certificates.log 2>&1" >> /etc/crontab \'')
+                put('/root/templates/step-cert-manager.service', '/etc/systemd/system/step-cert-manager.service',
+                    use_sudo=True)
+                sudo('systemctl daemon-reload')
+                sudo('systemctl enable step-cert-manager.service')
+            else:
+                sudo('openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout /etc/ssl/certs/dlab.key \
+                     -out /etc/ssl/certs/dlab.crt -subj "/C=US/ST=US/L=US/O=dlab/CN={}"'.format(hostname))
+            sudo('mkdir -p /tmp/lua')
             sudo('mkdir -p /tmp/src')
             with cd('/tmp/src/'):
                 sudo('wget http://nginx.org/download/nginx-{}.tar.gz'.format(nginx_version))
                 sudo('tar -xzf nginx-{}.tar.gz'.format(nginx_version))
+
+                sudo('wget https://github.com/openresty/lua-nginx-module/archive/v0.10.15.tar.gz')
+                sudo('tar -xzf v0.10.15.tar.gz')
+
+                sudo('wget https://github.com/simplresty/ngx_devel_kit/archive/v0.3.1.tar.gz')
+                sudo('tar -xzf v0.3.1.tar.gz')
+
+                sudo('wget http://luajit.org/download/LuaJIT-2.0.5.tar.gz')
+                sudo('tar -xzf LuaJIT-2.0.5.tar.gz')
+
+                sudo('wget http://keplerproject.github.io/luarocks/releases/luarocks-2.2.2.tar.gz')
+                sudo('tar -xzf luarocks-2.2.2.tar.gz')
+
                 sudo('ln -sf nginx-{} nginx'.format(nginx_version))
-            with cd('/tmp/src/nginx/'):
-                sudo('./configure --user=nginx --group=nginx --prefix=/etc/nginx --sbin-path=/usr/sbin/nginx \
-                              --conf-path=/etc/nginx/nginx.conf --pid-path=/run/nginx.pid --lock-path=/run/lock/subsys/nginx \
-                              --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log \
-                              --with-http_gzip_static_module --with-http_stub_status_module --with-http_ssl_module --with-pcre \
-                              --with-http_realip_module --with-file-aio --with-ipv6 --with-http_v2_module --with-debug \
-                              --without-http_scgi_module --without-http_uwsgi_module --without-http_fastcgi_module --with-http_sub_module \
-                              --add-module=/tmp/nginx_auth_ldap/nginx-auth-ldap/')
+
+            with cd('/tmp/src/LuaJIT-2.0.5/'):
                 sudo('make')
                 sudo('make install')
+
+            with cd('/tmp/src/nginx/'), shell_env(LUAJIT_LIB='/usr/local/lib/', LUAJIT_INC='/usr/local/include/luajit-2.0'):
+                sudo('./configure --user=nginx --group=nginx --prefix=/etc/nginx --sbin-path=/usr/sbin/nginx \
+                                              --conf-path=/etc/nginx/nginx.conf --pid-path=/run/nginx.pid --lock-path=/run/lock/subsys/nginx \
+                                              --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log \
+                                              --with-http_gzip_static_module --with-http_stub_status_module --with-http_ssl_module --with-pcre \
+                                              --with-http_realip_module --with-file-aio --with-ipv6 --with-http_v2_module --with-ld-opt="-Wl,-rpath,$LUAJIT_LIB"  \
+                                              --without-http_scgi_module --without-http_uwsgi_module --without-http_fastcgi_module --with-http_sub_module \
+                                              --add-dynamic-module=/tmp/src/ngx_devel_kit-0.3.1 --add-dynamic-module=/tmp/src/lua-nginx-module-0.10.15')
+                sudo('make')
+                sudo('make install')
+
+            with cd('/tmp/src/luarocks-2.2.2/'):
+                sudo('./configure --with-lua-include=/usr/local/include/luajit-2.0')
+                sudo('make build')
+                sudo('make install')
+                sudo('luarocks install lua-resty-jwt')
+                sudo('luarocks install lua-resty-session')
+                sudo('luarocks install lua-resty-http')
+                sudo('luarocks install lua-resty-openidc')
+                sudo('luarocks install luacrypto')
+                sudo('luarocks install lua-cjson')
+                sudo('luarocks install lua-resty-core')
+                sudo('luarocks install random')
+                sudo('luarocks install lua-resty-string')
+
             sudo('useradd -r nginx')
             sudo('rm -f /etc/nginx/nginx.conf')
             sudo('mkdir -p /opt/dlab/templates')
             put('/root/templates', '/opt/dlab', use_sudo=True)
-            sudo('sed -i \'s/LDAP_IP/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_ip))
-            sudo('sed -i \'s/LDAP_DN/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_dn))
-            sudo('sed -i \'s/LDAP_OU/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_ou))
-            sudo('sed -i \'s/LDAP_SERVICE_PASSWORD/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_service_pass))
-            sudo('sed -i \'s/LDAP_SERVICE_USERNAME/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_service_username))
             sudo('sed -i \'s/EDGE_IP/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(edge_ip))
+            sudo('sed -i \'s|KEYCLOAK_AUTH_URL|{}|g\' /opt/dlab/templates/conf.d/proxy.conf'.format(keycloak_auth_server_url))
+            sudo('sed -i \'s/KEYCLOAK_REALM_NAME/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(keycloak_realm_name))
+            sudo('sed -i \'s/KEYCLOAK_CLIENT_ID/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(keycloak_client_id))
+            sudo('sed -i \'s/KEYCLOAK_CLIENT_SECRET/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(keycloak_client_secret))
+
             sudo('cp /opt/dlab/templates/nginx.conf /etc/nginx/')
             sudo('mkdir /etc/nginx/conf.d')
             sudo('cp /opt/dlab/templates/conf.d/proxy.conf /etc/nginx/conf.d/')
diff --git a/infrastructure-provisioning/src/general/lib/os/redhat/notebook_lib.py b/infrastructure-provisioning/src/general/lib/os/redhat/notebook_lib.py
index 21d4666..34bae34 100644
--- a/infrastructure-provisioning/src/general/lib/os/redhat/notebook_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/redhat/notebook_lib.py
@@ -31,6 +31,7 @@
 from dlab.notebook_lib import *
 from dlab.fab import *
 import os, time
+from dlab.common_lib import manage_pkg
 
 
 def enable_proxy(proxy_host, proxy_port):
@@ -43,7 +44,7 @@
         if exists('/etc/yum.conf'):
             sudo('sed -i "/^proxy=/d" /etc/yum.conf')
         sudo("echo 'proxy={}' >> /etc/yum.conf".format(proxy_string))
-        sudo('yum clean all')
+        manage_pkg('clean all', 'remote', '')
     except:
         sys.exit(1)
 
@@ -65,7 +66,7 @@
             run('R -e "IRkernel::installspec()"')
             sudo('ln -s /opt/spark/ /usr/local/spark')
             try:
-                sudo('cd /usr/local/spark/R/lib/SparkR; R -e "install.packages(\'roxygen2\',repos=\'http://cran.us.r-project.org\')" R -e "devtools::check(\'.\')"')
+                sudo('cd /usr/local/spark/R/lib/SparkR; R -e "install.packages(\'roxygen2\',repos=\'https://cloud.r-project.org\')" R -e "devtools::check(\'.\')"')
             except:
                 pass
             sudo('cd /usr/local/spark/R/lib/SparkR; R -e "devtools::install(\'.\')"')
@@ -87,11 +88,11 @@
             if region == 'cn-north-1':
                 r_repository = r_mirror
             else:
-                r_repository = 'http://cran.us.r-project.org'
-            sudo('yum install -y cmake')
-            sudo('yum -y install libcur*')
+                r_repository = 'https://cloud.r-project.org'
+            manage_pkg('-y install', 'remote', 'cmake')
+            manage_pkg('-y install', 'remote', 'libcur*')
             sudo('echo -e "[base]\nname=CentOS-7-Base\nbaseurl=http://buildlogs.centos.org/centos/7/os/x86_64-20140704-1/\ngpgcheck=1\ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7\npriority=1\nexclude=php mysql" >> /etc/yum.repos.d/CentOS-base.repo')
-            sudo('yum install -y R R-core R-core-devel R-devel --nogpgcheck')
+            manage_pkg('-y install', 'remote', 'R R-core R-core-devel R-devel --nogpgcheck')
             sudo('R CMD javareconf')
             sudo('cd /root; git clone https://github.com/zeromq/zeromq4-x.git; cd zeromq4-x/; mkdir build; cd build; cmake ..; make install; ldconfig')
             for i in r_libs:
@@ -108,13 +109,13 @@
 def install_rstudio(os_user, local_spark_path, rstudio_pass, rstudio_version):
     if not exists('/home/' + os_user + '/.ensure_dir/rstudio_ensured'):
         try:
-            sudo('yum install -y --nogpgcheck https://download2.rstudio.org/rstudio-server-rhel-{}-x86_64.rpm'.format(rstudio_version))
+            manage_pkg('-y install --nogpgcheck', 'remote', 'https://download2.rstudio.org/server/centos6/x86_64/rstudio-server-rhel-{}-x86_64.rpm'.format(rstudio_version))
             sudo('mkdir -p /mnt/var')
             sudo('chown {0}:{0} /mnt/var'.format(os_user))
-            if os.environ['application'] == 'tensor-rstudio':
-                sudo("sed -i '/ExecStart/s|=|=/bin/bash -c \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; |g' /etc/systemd/system/rstudio-server.service")
-                sudo("sed -i '/ExecStart/s|$|\"|g' /etc/systemd/system/rstudio-server.service")
-                sudo("systemctl daemon-reload")
+            sudo("sed -i '/Type=forking/a \Environment=USER=dlab-user' /etc/systemd/system/rstudio-server.service")
+            sudo("sed -i '/ExecStart/s|=/usr/lib/rstudio-server/bin/rserver|=/bin/bash -c \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; /usr/lib/rstudio-server/bin/rserver --auth-none 1|g' /etc/systemd/system/rstudio-server.service")
+            sudo("sed -i '/ExecStart/s|$|\"|g' /etc/systemd/system/rstudio-server.service")
+            sudo("systemctl daemon-reload")
             sudo('touch /home/{}/.Renviron'.format(os_user))
             sudo('chown {0}:{0} /home/{0}/.Renviron'.format(os_user))
             sudo('''echo 'SPARK_HOME="{0}"' >> /home/{1}/.Renviron'''.format(local_spark_path, os_user))
@@ -157,7 +158,7 @@
     if not exists('/home/{}/.ensure_dir/sbt_ensured'.format(os_user)):
         try:
             sudo('curl https://bintray.com/sbt/rpm/rpm | sudo tee /etc/yum.repos.d/bintray-sbt-rpm.repo')
-            sudo('yum install -y sbt')
+            manage_pkg('-y install', 'remote', 'sbt')
             sudo('touch /home/{}/.ensure_dir/sbt_ensured'.format(os_user))
         except:
             sys.exit(1)
@@ -166,8 +167,8 @@
 def ensure_jre_jdk(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/jre_jdk_ensured'):
         try:
-            sudo('yum install -y java-1.8.0-openjdk')
-            sudo('yum install -y java-1.8.0-openjdk-devel')
+            manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk')
+            manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk-devel')
             sudo('touch /home/' + os_user + '/.ensure_dir/jre_jdk_ensured')
         except:
             sys.exit(1)
@@ -186,8 +187,8 @@
 def ensure_additional_python_libs(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/additional_python_libs_ensured'):
         try:
-            sudo('yum clean all')
-            sudo('yum install -y zlib-devel libjpeg-turbo-devel --nogpgcheck')
+            manage_pkg('clean', 'remote', 'all')
+            manage_pkg('-y install', 'remote', 'zlib-devel libjpeg-turbo-devel --nogpgcheck')
             if os.environ['application'] in ('jupyter', 'zeppelin'):
                 sudo('pip2 install NumPy=={} SciPy pandas Sympy Pillow sklearn --no-cache-dir'.format(os.environ['notebook_numpy_version']))
                 sudo('python3.5 -m pip install NumPy=={} SciPy pandas Sympy Pillow sklearn --no-cache-dir'.format(os.environ['notebook_numpy_version']))
@@ -202,8 +203,8 @@
 def ensure_python3_specific_version(python3_version, os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/python3_specific_version_ensured'):
         try:
-            sudo('yum install -y yum-utils python34 openssl-devel')
-            sudo('yum -y groupinstall development --nogpgcheck')
+            manage_pkg('-y install', 'remote', 'yum-utils python34 openssl-devel')
+            manage_pkg('-y groupinstall', 'remote', 'development --nogpgcheck')
             if len(python3_version) < 4:
                 python3_version = python3_version + ".0"
             sudo('wget https://www.python.org/ftp/python/{0}/Python-{0}.tgz'.format(python3_version))
@@ -217,8 +218,8 @@
     if not exists('/home/' + os_user + '/.ensure_dir/python2_libraries_ensured'):
         try:
             sudo('pip2 install pyparsing==2.0.3')
-            sudo('yum install -y python-setuptools python-wheel')
-            sudo('yum install -y python-virtualenv openssl-devel python-devel openssl-libs libxslt-devel --nogpgcheck')
+            manage_pkg('-y install', 'remote', 'python-setuptools python-wheel')
+            manage_pkg('-y install', 'remote', 'python-virtualenv openssl-devel python-devel openssl-libs libxslt-devel --nogpgcheck')
             try:
                 sudo('python2 -m pip install backports.shutil_get_terminal_size tornado=={0} ipython ipykernel=={1} --no-cache-dir' \
                      .format(os.environ['notebook_tornado_version'], os.environ['notebook_ipykernel_version']))
@@ -239,8 +240,8 @@
 def ensure_python3_libraries(os_user):
     if not exists('/home/' + os_user + '/.ensure_dir/python3_libraries_ensured'):
         try:
-            sudo('yum -y install https://centos7.iuscommunity.org/ius-release.rpm')
-            sudo('yum install -y python35u python35u-pip python35u-devel')
+            manage_pkg('-y install', 'remote', 'https://centos7.iuscommunity.org/ius-release.rpm')
+            manage_pkg('-y install', 'remote', 'python35u python35u-pip python35u-devel')
             sudo('python3.5 -m pip install -U pip=={} setuptools --no-cache-dir'.format(os.environ['conf_pip_version']))
             sudo('python3.5 -m pip install boto3 --no-cache-dir')
             sudo('python3.5 -m pip install fabvenv fabric-virtualenv future --no-cache-dir')
@@ -266,7 +267,7 @@
             sudo('dracut --force')
             with settings(warn_only=True):
                 reboot(wait=150)
-            sudo('yum -y install libglvnd-opengl libglvnd-devel dkms gcc kernel-devel-$(uname -r) kernel-headers-$(uname -r)')
+            manage_pkg('-y install', 'remote', 'libglvnd-opengl libglvnd-devel dkms gcc kernel-devel-$(uname -r) kernel-headers-$(uname -r)')
             sudo('wget http://us.download.nvidia.com/XFree86/Linux-x86_64/{0}/NVIDIA-Linux-x86_64-{0}.run -O /home/{1}/NVIDIA-Linux-x86_64-{0}.run'.format(nvidia_version, os_user))
             sudo('/bin/bash /home/{0}/NVIDIA-Linux-x86_64-{1}.run -s --dkms'.format(os_user, nvidia_version))
             sudo('rm -f /home/{0}/NVIDIA-Linux-x86_64-{1}.run'.format(os_user, nvidia_version))
@@ -338,7 +339,7 @@
 def install_nodejs(os_user):
     if not exists('/home/{}/.ensure_dir/nodejs_ensured'.format(os_user)):
         sudo('curl -sL https://rpm.nodesource.com/setup_6.x | sudo -E bash -')
-        sudo('yum install -y nodejs')
+        manage_pkg('-y install', 'remote', 'nodejs')
         sudo('touch /home/{}/.ensure_dir/nodejs_ensured'.format(os_user))
 
 
@@ -347,10 +348,10 @@
     error_parser = "Could not|No matching|Error:|failed|Requires:|Errno"
     try:
         print("Updating repositories and installing requested tools: {}".format(requisites))
-        sudo('yum update-minimal --security -y --skip-broken')
+        manage_pkg('update-minimal --security -y --skip-broken', 'remote', '')
         sudo('export LC_ALL=C')
         for os_pkg in requisites:
-            sudo('yum -y install {0} --nogpgcheck 2>&1 | if ! grep -w -E  "({1})" >  /tmp/os_install_{0}.log; then  echo "" > /tmp/os_install_{0}.log;fi'.format(os_pkg, error_parser))
+            manage_pkg('-y install', 'remote', '{0} --nogpgcheck 2>&1 | if ! grep -w -E  "({1})" >  /tmp/os_install_{0}.log; then  echo "" > /tmp/os_install_{0}.log;fi'.format(os_pkg, error_parser))
             err = sudo('cat /tmp/os_install_{}.log'.format(os_pkg)).replace('"', "'")
             try:
                 res = sudo('python -c "import os,sys,yum; yb = yum.YumBase(); pl = yb.doPackageLists(); print [pkg.vr for pkg in pl.installed if pkg.name == \'{0}\'][0]"'.format(os_pkg))
@@ -365,14 +366,14 @@
 
 def remove_os_pkg(pkgs):
     try:
-        sudo('yum remove -y {}'.format(' '.join(pkgs)))
+        manage_pkg('remove -y', 'remote', '{}'.format(' '.join(pkgs)))
     except:
         sys.exit(1)
 
 
 def get_available_os_pkgs():
     try:
-        sudo('yum update-minimal --security -y --skip-broken')
+        manage_pkg('update-minimal --security -y --skip-broken', 'remote', '')
         downgrade_python_version()
         yum_raw = sudo('python -c "import os,sys,yum; yb = yum.YumBase(); pl = yb.doPackageLists(); print {pkg.name:pkg.vr for pkg in pl.available}"')
         yum_re = re.sub\
@@ -387,7 +388,7 @@
 
 def install_opencv(os_user):
     if not exists('/home/{}/.ensure_dir/opencv_ensured'.format(os_user)):
-        sudo('yum install -y cmake python34 python34-devel python34-pip gcc gcc-c++')
+        manage_pkg('-y install', 'remote', 'cmake python34 python34-devel python34-pip gcc gcc-c++')
         sudo('pip2 install numpy=={} --no-cache-dir'.format(os.environ['notebook_numpy_version']))
         sudo('pip3.4 install numpy=={} --no-cache-dir'.format(os.environ['notebook_numpy_version']))
         sudo('pip3.5 install numpy=={} --no-cache-dir'.format(os.environ['notebook_numpy_version']))
@@ -405,8 +406,8 @@
 def install_caffe2(os_user, caffe2_version, cmake_version):
     if not exists('/home/{}/.ensure_dir/caffe2_ensured'.format(os_user)):
         env.shell = "/bin/bash -l -c -i"
-        sudo('yum update-minimal --security -y')
-        sudo('yum install -y --nogpgcheck automake cmake3 gcc gcc-c++ kernel-devel leveldb-devel lmdb-devel libtool protobuf-devel graphviz')
+        manage_pkg('update-minimal --security -y', 'remote', '')
+        manage_pkg('-y install --nogpgcheck', 'remote', 'automake cmake3 gcc gcc-c++ kernel-devel leveldb-devel lmdb-devel libtool protobuf-devel graphviz')
         sudo('pip2 install flask graphviz hypothesis jupyter matplotlib==2.0.2 numpy=={} protobuf pydot python-nvd3 pyyaml '
              'requests scikit-image scipy setuptools tornado future --no-cache-dir'.format(os.environ['notebook_numpy_version']))
         sudo('pip3.5 install flask graphviz hypothesis jupyter matplotlib==2.0.2 numpy=={} protobuf pydot python-nvd3 pyyaml '
@@ -432,8 +433,9 @@
 def install_cntk(os_user, cntk_version):
     if not exists('/home/{}/.ensure_dir/cntk_ensured'.format(os_user)):
         sudo('echo "exclude=*.i386 *.i686" >> /etc/yum.conf')
-        sudo('yum clean all && yum update-minimal --security -y')
-        sudo('yum install -y openmpi openmpi-devel --nogpgcheck')
+        manage_pkg('clean', 'remote', 'all')
+        manage_pkg('update-minimal --security -y', 'remote', '')
+        manage_pkg('-y install --nogpgcheck', 'remote', 'openmpi openmpi-devel')
         sudo('pip2 install https://cntk.ai/PythonWheel/GPU/cntk-{}-cp27-cp27mu-linux_x86_64.whl --no-cache-dir'.format(cntk_version))
         sudo('pip3.5 install https://cntk.ai/PythonWheel/GPU/cntk-{}-cp35-cp35m-linux_x86_64.whl --no-cache-dir'.format(cntk_version))
         sudo('touch /home/{}/.ensure_dir/cntk_ensured'.format(os_user))
@@ -464,9 +466,7 @@
     if not exists('/home/{}/.ensure_dir/torch_ensured'.format(os_user)):
         run('git clone https://github.com/torch/distro.git ~/torch --recursive')
         with cd('/home/{}/torch/'.format(os_user)):
-            sudo('yum install -y --nogpgcheck cmake curl readline-devel ncurses-devel gcc-c++ gcc-gfortran git '
-                 'gnuplot unzip libjpeg-turbo-devel libpng-devel ImageMagick GraphicsMagick-devel fftw-devel '
-                 'sox-devel sox zeromq3-devel qt-devel qtwebkit-devel sox-plugins-freeworld qt-devel')
+            manage_pkg('-y install --nogpgcheck', 'remote', 'cmake curl readline-devel ncurses-devel gcc-c++ gcc-gfortran git gnuplot unzip libjpeg-turbo-devel libpng-devel ImageMagick GraphicsMagick-devel fftw-devel sox-devel sox zeromq3-devel qt-devel qtwebkit-devel sox-plugins-freeworld qt-devel')
             run('./install.sh -b')
         run('source /home/{}/.bashrc'.format(os_user))
         sudo('touch /home/{}/.ensure_dir/torch_ensured'.format(os_user))
diff --git a/infrastructure-provisioning/src/general/lib/os/redhat/ssn_lib.py b/infrastructure-provisioning/src/general/lib/os/redhat/ssn_lib.py
index a3dfd07..83fd2ca 100644
--- a/infrastructure-provisioning/src/general/lib/os/redhat/ssn_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/redhat/ssn_lib.py
@@ -30,6 +30,7 @@
 import json
 import sys
 import traceback
+from dlab.common_lib import manage_pkg
 
 
 def ensure_docker_daemon(dlab_path, os_user, region):
@@ -48,9 +49,9 @@
                 sudo('echo "gpgcheck=1" >> centos.repo')
                 sudo('echo "gpgkey=http://{}/centos/7/os/x86_64/RPM-GPG-KEY-CentOS-7" >> centos.repo'.format(mirror))
             sudo('yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo')
-            sudo('yum update-minimal --security -y')
-            sudo('yum install container-selinux -y')
-            sudo('yum install docker-ce-{}.ce -y'.format(docker_version))
+            manage_pkg('update-minimal --security -y', 'remote', '')
+            manage_pkg('-y install', 'remote', 'container-selinux')
+            manage_pkg('-y install', 'remote', 'docker-ce-{}.ce'.format(docker_version))
             sudo('usermod -aG docker {}'.format(os_user))
             sudo('systemctl enable docker.service')
             sudo('systemctl start docker')
@@ -63,7 +64,7 @@
 def ensure_nginx(dlab_path):
     try:
         if not exists('{}tmp/nginx_ensured'.format(dlab_path)):
-            sudo('yum -y install nginx')
+            manage_pkg('-y install', 'remote', 'nginx')
             sudo('systemctl restart nginx.service')
             sudo('chkconfig nginx on')
             sudo('touch {}tmp/nginx_ensured'.format(dlab_path))
@@ -81,9 +82,8 @@
                 sudo('rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key')
             except:
                 pass
-            sudo('yum -y install java-1.8.0-openjdk-devel')
-            sudo('yum -y install jenkins')
-            sudo('yum -y install policycoreutils-python')
+            manage_pkg('-y install', 'remote', 'jenkins')
+            manage_pkg('-y install', 'remote', 'policycoreutils-python')
             sudo('touch {}tmp/jenkins_ensured'.format(dlab_path))
     except Exception as err:
         traceback.print_exc()
@@ -161,7 +161,7 @@
 def ensure_supervisor():
     try:
         if not exists('{}tmp/superv_ensured'.format(os.environ['ssn_dlab_path'])):
-            sudo('yum install -y supervisor')
+            manage_pkg('-y install', 'remote', 'supervisor')
             #sudo('pip install supervisor')
             sudo('chkconfig supervisord on')
             sudo('systemctl start supervisord')
@@ -181,7 +181,7 @@
                  '\nenabled=1'
                  '\ngpgkey=https://www.mongodb.org/static/pgp/server-3.2.asc" '
                  '> /etc/yum.repos.d/mongodb.repo')
-            sudo('yum install -y mongodb-org')
+            manage_pkg('-y install', 'remote', 'mongodb-org')
             sudo('semanage port -a -t mongod_port_t -p tcp 27017')
             sudo('chkconfig mongod on')
             sudo('echo "d /var/run/mongodb 0755 mongod mongod" > /lib/tmpfiles.d/mongodb.conf')
@@ -255,6 +255,8 @@
                         item['key'], item['value']))
                 sudo('sed -i "s|SERVICE_BASE_NAME|{0}|g" /tmp/yml_tmp/self-service.yml'.format(service_base_name))
                 sudo('sed -i "s|OPERATION_SYSTEM|redhat|g" /tmp/yml_tmp/self-service.yml')
+                sudo('sed -i "s|<SSN_INSTANCE_SIZE>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(
+                    os.environ['{0}_ssn_instance_size'.format(os.environ['conf_cloud_provider'])]))
                 if os.environ['conf_cloud_provider'] == 'azure':
                     sudo('sed -i "s|<LOGIN_USE_LDAP>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(ldap_login))
                     sudo('sed -i "s|<LOGIN_TENANT_ID>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(tenant_id))
@@ -333,13 +335,27 @@
                 sudo('python /tmp/configure_billing.py {}'.format(params))
 
             try:
-                sudo('keytool -genkeypair -alias dlab -keyalg RSA -validity 730 -storepass {1} -keypass {1} \
-                     -keystore /home/{0}/keys/dlab.keystore.jks -keysize 2048 -dname "CN=localhost"'.format(
-                    os_user, keystore_passwd))
-                sudo('keytool -exportcert -alias dlab -storepass {1} -file /home/{0}/keys/dlab.crt \
-                     -keystore /home/{0}/keys/dlab.keystore.jks'.format(os_user, keystore_passwd))
-                sudo('keytool -importcert -trustcacerts -alias dlab -file /home/{0}/keys/dlab.crt -noprompt \
-                     -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_path))
+                if os.environ['conf_stepcerts_enabled'] == 'true':
+                    sudo('openssl pkcs12 -export -in /etc/ssl/certs/dlab.crt -inkey /etc/ssl/certs/dlab.key -name ssn '
+                         '-out ssn.p12 -password pass:{0}'.format(keystore_passwd))
+                    sudo('keytool -importkeystore -srckeystore ssn.p12 -srcstoretype PKCS12 -alias ssn -destkeystore '
+                         '/home/{0}/keys/ssn.keystore.jks -deststorepass "{1}" -srcstorepass "{1}"'.format(
+                        os_user, keystore_passwd))
+                    sudo('keytool -keystore /home/{0}/keys/ssn.keystore.jks -alias step-ca -import -file '
+                         '/etc/ssl/certs/root_ca.crt  -deststorepass "{1}" -srcstorepass "{1}" -noprompt'.format(
+                        os_user, keystore_passwd))
+                    sudo('keytool -importcert -trustcacerts -alias step-ca -file /etc/ssl/certs/root_ca.crt '
+                         '-noprompt -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_path))
+                    sudo('keytool -importcert -trustcacerts -alias ssn -file /etc/ssl/certs/dlab.crt -noprompt '
+                         '-storepass changeit -keystore {0}/lib/security/cacerts'.format(java_path))
+                else:
+                    sudo('keytool -genkeypair -alias ssn -keyalg RSA -validity 730 -storepass {1} -keypass {1} \
+                         -keystore /home/{0}/keys/ssn.keystore.jks -keysize 2048 -dname "CN=localhost"'.format(
+                        os_user, keystore_passwd))
+                    sudo('keytool -exportcert -alias ssn -storepass {1} -file /etc/ssl/certs/dlab.crt \
+                         -keystore /home/{0}/keys/ssn.keystore.jks'.format(os_user, keystore_passwd))
+                    sudo('keytool -importcert -trustcacerts -alias ssn -file /etc/ssl/certs/dlab.crt -noprompt \
+                         -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_path))
             except:
                 append_result("Unable to generate cert and copy to java keystore")
                 sys.exit(1)
@@ -356,14 +372,14 @@
     try:
         if not exists('{}tmp/build_dep_ensured'.format(os.environ['ssn_dlab_path'])):
             maven_version = '3.5.4'
-            sudo('yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel git wget unzip')
+            manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk java-1.8.0-openjdk-devel git wget unzip')
             with cd('/opt/'):
                 sudo('wget http://mirrors.sonic.net/apache/maven/maven-{0}/{1}/binaries/apache-maven-{1}-bin.zip'.format(
                     maven_version.split('.')[0], maven_version))
                 sudo('unzip apache-maven-{}-bin.zip'.format(maven_version))
                 sudo('mv apache-maven-{} maven'.format(maven_version))
             sudo('bash -c "curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -"')
-            sudo('yum install -y nodejs')
+            manage_pkg('-y install', 'remote', 'nodejs')
             sudo('npm config set unsafe-perm=true')
             sudo('touch {}tmp/build_dep_ensured'.format(os.environ['ssn_dlab_path']))
     except Exception as err:
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py
index 9bce147..207af06 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py
@@ -30,21 +30,19 @@
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--bucket_name', type=str, default='')
-parser.add_argument('--bucket_name_tag', type=str, default='')
-parser.add_argument('--infra_tag_name', type=str, default='')
-parser.add_argument('--infra_tag_value', type=str, default='')
+parser.add_argument('--bucket_tags', type=str, default='')
 parser.add_argument('--region', type=str, default='')
+parser.add_argument('--bucket_name_tag', type=str, default='')
 args = parser.parse_args()
 
 
 if __name__ == "__main__":
-    tag = {"Key": args.infra_tag_name, "Value": args.infra_tag_value}
     if args.bucket_name != '':
         try:
             bucket = get_bucket_by_name(args.bucket_name)
             if bucket == '':
-                print("Creating bucket {0} with tag {1}.".format(args.bucket_name, json.dumps(tag)))
-                bucket = create_s3_bucket(args.bucket_name, tag, args.region, args.bucket_name_tag)
+                print("Creating bucket {0} with tags {1}.".format(args.bucket_name, args.bucket_tags))
+                bucket = create_s3_bucket(args.bucket_name, args.bucket_tags, args.region, args.bucket_name_tag)
             else:
                 print("REQUESTED BUCKET ALREADY EXISTS")
             print("BUCKET_NAME {}".format(bucket))
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
index 1e9ef5c..1d5cb04 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
@@ -21,30 +21,33 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
 import json
 import uuid
+import os
 
 
 if __name__ == "__main__":
     try:
         image_conf = dict()
-        create_aws_config_files()
-        image_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+        dlab.actions_lib.create_aws_config_files()
+        image_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         image_conf['project_name'] = os.environ['project_name']
         image_conf['project_tag'] = os.environ['project_name']
+        image_conf['endpoint_name'] = os.environ['endpoint_name']
         image_conf['instance_name'] = os.environ['notebook_instance_name']
-        image_conf['instance_tag'] = '{}-Tag'.format(image_conf['service_base_name'])
+        image_conf['instance_tag'] = '{}-tag'.format(image_conf['service_base_name'])
         image_conf['application'] = os.environ['application']
-        image_conf['image_name'] = os.environ['notebook_image_name'].lower().replace('_', '-')
-        image_conf['full_image_name'] = '{}-{}-{}-{}'.format(image_conf['service_base_name'],
-                                                             image_conf['project_name'],
-                                                             image_conf['application'],
-                                                             image_conf['image_name']).lower()
+        image_conf['image_name'] = os.environ['notebook_image_name']
+        image_conf['full_image_name'] = '{}-{}-{}-{}-{}'.format(image_conf['service_base_name'],
+                                                                image_conf['project_name'],
+                                                                image_conf['endpoint_name'],
+                                                                image_conf['application'],
+                                                                image_conf['image_name'])
         image_conf['tags'] = {"Name": image_conf['full_image_name'],
                               "SBN": image_conf['service_base_name'],
                               "Project": image_conf['project_name'],
@@ -52,16 +55,19 @@
                               "FIN": image_conf['full_image_name'],
                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
 
-        ami_id = get_ami_id_by_name(image_conf['full_image_name'])
+        ami_id = dlab.meta_lib.get_ami_id_by_name(image_conf['full_image_name'])
         if ami_id == '':
             try:
-                os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+                os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
+                                                     ';project_tag:{0};endpoint_tag:{1};'.format(
+                                                         os.environ['project_name'], os.environ['endpoint_name'])
             except KeyError:
-                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
-            image_id = create_image_from_instance(tag_name=image_conf['instance_tag'],
-                                                  instance_name=image_conf['instance_name'],
-                                                  image_name=image_conf['full_image_name'],
-                                                  tags=json.dumps(image_conf['tags']))
+                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+                    os.environ['project_name'], os.environ['endpoint_name'])
+            image_id = dlab.actions_lib.create_image_from_instance(tag_name=image_conf['instance_tag'],
+                                                                   instance_name=image_conf['instance_name'],
+                                                                   image_name=image_conf['full_image_name'],
+                                                                   tags=json.dumps(image_conf['tags']))
             print("Image was successfully created. It's name is {}".format(image_conf['full_image_name']))
 
             with open("/root/result.json", 'w') as result:
@@ -73,5 +79,5 @@
                        "Action": "Create image from notebook"}
                 result.write(json.dumps(res))
     except Exception as err:
-        append_result("Failed to create image from notebook", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create image from notebook", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
index 3cad721..8b7f038 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
@@ -29,13 +29,13 @@
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--bucket_name', type=str, default='')
-parser.add_argument('--ssn_bucket_name', type=str, default='')
 parser.add_argument('--shared_bucket_name', type=str, default='')
 parser.add_argument('--service_base_name', type=str, default='')
 parser.add_argument('--username', type=str, default='')
 parser.add_argument('--edge_role_name', type=str, default='')
 parser.add_argument('--notebook_role_name', type=str, default='')
 parser.add_argument('--region', type=str, default='')
+parser.add_argument('--endpoint_name', type=str, default='')
 parser.add_argument('--user_predefined_s3_policies', type=str, default='')
 args = parser.parse_args()
 
@@ -46,8 +46,6 @@
             handler = open('/root/templates/edge_s3_policy.json', 'r')
             policy = handler.read()
             policy = policy.replace('BUCKET_NAME', args.bucket_name)
-            # Removed for multiple Endpoints per project
-            # policy = policy.replace('SSN_BUCK', args.ssn_bucket_name)
             policy = policy.replace('SHARED_BUCK', args.shared_bucket_name)
             if args.region == 'cn-north-1':
                 policy = policy.replace('aws', 'aws-cn')
@@ -67,18 +65,19 @@
                     for i in list:
                         if i.get('PolicyName') in list_predefined_policies:
                             list_policies_arn.append(i.get('Arn'))
-                response = iam.create_policy(PolicyName='{}-{}-strict_to_S3-Policy'.
-                                             format(args.service_base_name, args.username), PolicyDocument=policy)
+                response = iam.create_policy(PolicyName='{}-{}-{}-strict_to_S3-Policy'.
+                                             format(args.service_base_name, args.username, args.endpoint_name),
+                                             PolicyDocument=policy)
                 time.sleep(10)
                 list_policies_arn.append(response.get('Policy').get('Arn'))
             except botocore.exceptions.ClientError as cle:
                 if cle.response['Error']['Code'] == 'EntityAlreadyExists':
-                    print("Policy {}-{}-strict_to_S3-Policy already exists. Reusing it.".
-                          format(args.service_base_name, args.username))
+                    print("Policy {}-{}-{}-strict_to_S3-Policy already exists. Reusing it.".
+                          format(args.service_base_name, args.username, args.endpoint_name))
                     list = iam.list_policies().get('Policies')
                     for i in list:
-                        if '{}-{}-strict_to_S3-Policy'.format(
-                                args.service_base_name, args.username) == i.get('PolicyName') or (
+                        if '{}-{}-{}-strict_to_S3-Policy'.format(
+                                args.service_base_name, args.username, args.endpoint_name) == i.get('PolicyName') or (
                                 args.user_predefined_s3_policies != 'None' and i.get('PolicyName') in
                                 list_predefined_policies):
                             list_policies_arn.append(i.get('Arn'))
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
index 5469e2a..b4dc3c6 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
@@ -132,7 +132,7 @@
             print("Associating route_table with the subnet")
             ec2 = boto3.resource('ec2')
             if os.environ['conf_duo_vpc_enable'] == 'true':
-                rt = get_route_table_by_tag(args.infra_tag_value + '-secondary-Tag', args.infra_tag_value)
+                rt = get_route_table_by_tag(args.infra_tag_value + '-secondary-tag', args.infra_tag_value)
             else:
                 rt = get_route_table_by_tag(args.infra_tag_name, args.infra_tag_value)
             route_table = ec2.RouteTable(rt)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py b/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
index bd7d266..8051e6d 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
@@ -43,7 +43,7 @@
     env.host_string = env.user + "@" + env.hosts
 
     service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower()[:12], '-', True)
+        os.environ['conf_service_base_name'][:20], '-', True)
     project_name = os.environ['project_name']
     endpoint_name = os.environ['endpoint_name']
     bucket_name = ('{0}-{1}-{2}-bucket'.format(service_base_name,
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
index 6fe139b..1d0df4f 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
@@ -24,11 +24,21 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    emr_id = dlab.meta_lib.get_emr_id_by_name(notebook_config['cluster_name'])
+    dlab.actions_lib.terminate_emr(emr_id)
+    dlab.actions_lib. remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'],
+                                     os.environ['notebook_instance_name'], os.environ['conf_os_user'],
+                                     notebook_config['key_path'], os.environ['emr_version'])
 
 
 if __name__ == "__main__":
@@ -38,38 +48,45 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    # generating variables dictionary
-    create_aws_config_files()
-    print('Generating infrastructure names and tags')
-    notebook_config = dict()
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
-    notebook_config['project_name'] = os.environ['project_name']
-    notebook_config['endpoint_name'] = os.environ['endpoint_name']
-    notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
-                                                                  notebook_config['project_name'],
-                                                                  notebook_config['endpoint_name'])).lower().replace('_', '-')
-    notebook_config['cluster_name'] = get_not_configured_emr(notebook_config['tag_name'],
-                                                             notebook_config['notebook_name'], True)
-    notebook_config['notebook_ip'] = get_instance_ip_address(notebook_config['tag_name'],
-                                                             notebook_config['notebook_name']).get('Private')
-    notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
-    notebook_config['cluster_id'] = get_emr_id_by_name(notebook_config['cluster_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    if os.environ['application'] == 'deeplearning':
-        application = 'jupyter'
-    else:
-        application = os.environ['application']
+    try:
+        # generating variables dictionary
+        dlab.actions_lib.create_aws_config_files()
+        print('Generating infrastructure names and tags')
+        notebook_config = dict()
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+                os.environ['conf_service_base_name'][:20], '-', True)
+        notebook_config['notebook_name'] = os.environ['notebook_instance_name']
+        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name']
+                                                                     ).lower().replace('_', '-')
+        notebook_config['cluster_name'] = dlab.meta_lib.get_not_configured_emr(notebook_config['tag_name'],
+                                                                               notebook_config['notebook_name'], True)
+        notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
+        notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+        notebook_config['cluster_id'] = dlab.meta_lib.get_emr_id_by_name(notebook_config['cluster_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    os.environ['project_name'], os.environ['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        if os.environ['application'] == 'deeplearning':
+            application = 'jupyter'
+        else:
+            application = os.environ['application']
+    except Exception as err:
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
 
     try:
         logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
         print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
-        params = "--bucket {} --cluster_name {} --emr_version {} --keyfile {} --notebook_ip {} --region {} --emr_excluded_spark_properties {} --project_name {} --os_user {}  --edge_hostname {} --proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
+        params = "--bucket {} --cluster_name {} --emr_version {} --keyfile {} --notebook_ip {} --region {} " \
+                 "--emr_excluded_spark_properties {} --project_name {} --os_user {}  --edge_hostname {} " \
+                 "--proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
             .format(notebook_config['bucket_name'], notebook_config['cluster_name'], os.environ['emr_version'],
                     notebook_config['key_path'], notebook_config['notebook_ip'], os.environ['aws_region'],
                     os.environ['emr_excluded_spark_properties'], os.environ['project_name'],
@@ -77,17 +94,15 @@
                     os.environ['application'], os.environ['conf_pypi_mirror'])
         try:
             local("~/scripts/{}_{}.py {}".format(application, 'install_dataengine-service_kernels', params))
-            remove_emr_tag(notebook_config['cluster_id'], ['State'])
-            tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'], os.environ['conf_tag_resource_id'])
+            dlab.actions_lib.remove_emr_tag(notebook_config['cluster_id'], ['State'])
+            dlab.actions_lib.tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'],
+                                            os.environ['conf_tag_resource_id'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed installing EMR kernels.", str(err))
-        emr_id = get_emr_id_by_name(notebook_config['cluster_name'])
-        terminate_emr(emr_id)
-        remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'], os.environ['notebook_instance_name'],
-                       os.environ['conf_os_user'], notebook_config['key_path'], os.environ['emr_version'])
+        dlab.fab.append_result("Failed installing EMR kernels.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -101,17 +116,15 @@
                     os.environ['conf_os_user'])
         try:
             local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
-            remove_emr_tag(notebook_config['cluster_id'], ['State'])
-            tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'], os.environ['conf_tag_resource_id'])
+            dlab.actions_lib.remove_emr_tag(notebook_config['cluster_id'], ['State'])
+            dlab.actions_lib.tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'],
+                                            os.environ['conf_tag_resource_id'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed to configure Spark.", str(err))
-        emr_id = get_emr_id_by_name(notebook_config['cluster_name'])
-        terminate_emr(emr_id)
-        remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'], os.environ['notebook_instance_name'],
-                       os.environ['conf_os_user'], notebook_config['key_path'], os.environ['emr_version'])
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -121,6 +134,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
index 0cd06be..c80328b 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
+    for i in range(notebook_config['instance_count'] - 1):
+        slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], slave_name)
 
 
 if __name__ == "__main__":
@@ -41,25 +50,27 @@
 
     try:
         # generating variables dictionary
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
         print('Generating infrastructure names and tags')
         notebook_config = dict()
-        try:
+        if 'exploratory_name' in os.environ:
             notebook_config['exploratory_name'] = os.environ['exploratory_name']
-        except:
+        else:
             notebook_config['exploratory_name'] = ''
-        try:
+        if 'computational_name' in os.environ:
             notebook_config['computational_name'] = os.environ['computational_name']
-        except:
+        else:
             notebook_config['computational_name'] = ''
-        notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         notebook_config['region'] = os.environ['aws_region']
-        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
         notebook_config['project_name'] = os.environ['project_name']
-        notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
-                                          '-de-' + notebook_config['exploratory_name'] + '-' + \
-                                          notebook_config['computational_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['cluster_name'] = "{}-{}-{}-de-{}".format(notebook_config['service_base_name'],
+                                                                  notebook_config['project_name'],
+                                                                  notebook_config['endpoint_name'],
+                                                                  notebook_config['computational_name'])
         notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
         notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -67,21 +78,18 @@
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
         try:
-            notebook_config['spark_master_ip'] = get_instance_private_ip_address(
+            notebook_config['spark_master_ip'] = dlab.meta_lib.get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['master_node_name'])
-            notebook_config['notebook_ip'] = get_instance_private_ip_address(
+            notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
         except Exception as err:
-            print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to get ip address", str(err))
             sys.exit(1)
         notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
 
     except Exception as err:
-        remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(notebook_config['tag_name'], slave_name)
-        append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
         sys.exit(1)
 
     try:
@@ -99,11 +107,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(notebook_config['tag_name'], slave_name)
-        append_result("Failed installing Dataengine kernels.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -123,11 +128,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(notebook_config['tag_name'], slave_name)
-        append_result("Failed to configure Spark.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
         sys.exit(1)
 
     try:
@@ -136,6 +138,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
index de8b3b4..5c481ac 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import os
 import argparse
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -42,92 +44,107 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
+    try:
+        # generating variables dictionary
+        dlab.actions_lib.create_aws_config_files()
+        notebook_config = dict()
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['edge_name'] = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                              notebook_config['project_name'],
+                                                              notebook_config['endpoint_name'])
+        edge_status = dlab.meta_lib.get_instance_status(notebook_config['service_base_name'] + '-tag',
+                                                        notebook_config['edge_name'])
+        if edge_status != 'running':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            notebook_config['ssn_hostname'] = dlab.meta_lib.get_instance_hostname(
+                '{}-tag'.format(notebook_config['service_base_name']),
+                '{}-ssn'.format(notebook_config['service_base_name']))
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         notebook_config['ssn_hostname'])
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+        print('Generating infrastructure names and tags')
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
 
-    # generating variables dictionary
-    create_aws_config_files()
-    notebook_config = dict()
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['edge_name'] = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                          os.environ['project_name'], os.environ['endpoint_name'])
-    edge_status = get_instance_status(notebook_config['service_base_name'] + '-Tag', notebook_config['edge_name'])
-    if edge_status != 'running':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = get_instance_hostname(notebook_config['service_base_name'] + '-Tag', notebook_config['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
-        append_result("Edge node is unavailable")
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(
+            os.environ['application'])
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+        notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}-{4}'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+            os.environ['application'], os.environ['notebook_image_name']) if (x != 'None' and x != '')
+            else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
+        print('Searching pre-configured images')
+        notebook_config['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+            os.environ['conf_os_family'])])
+        image_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['notebook_image_name'], 'available')
+        if image_id != '':
+            notebook_config['ami_id'] = image_id
+            print('Pre-configured image found. Using: {}'.format(notebook_config['ami_id']))
+        else:
+            os.environ['notebook_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+            print('No pre-configured image found. Using default one: {}'.format(notebook_config['ami_id']))
+
+        tag = {"Key": notebook_config['tag_name'],
+               "Value": "{}-{}-{}-subnet".format(notebook_config['service_base_name'], notebook_config['project_name'],
+                                                 notebook_config['endpoint_name'])}
+        notebook_config['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+
+        with open('/root/result.json', 'w') as f:
+            data = {"notebook_name": notebook_config['instance_name'], "error": ""}
+            json.dump(data, f)
+
+        try:
+            os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                notebook_config['project_name'], notebook_config['endpoint_name'], os.environ['conf_additional_tags'])
+        except KeyError:
+            os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+                notebook_config['project_name'], notebook_config['endpoint_name'])
+
+        print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
-    print('Generating infrastructure names and tags')
-    try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(os.environ['application'])
-    notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                       os.environ['endpoint_name'],
-                                                                                       os.environ['application'])
-    notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}'.format(notebook_config['service_base_name'],
-                                                                                 os.environ['project_name'],
-                                                                                 os.environ['application'],
-                                                                                 os.environ['notebook_image_name']).lower().replace('_', '-') if (x != 'None' and x != '')
-        else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
-    print('Searching pre-configured images')
-    notebook_config['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
-    image_id = get_ami_id_by_name(notebook_config['notebook_image_name'], 'available')
-    if image_id != '':
-        notebook_config['ami_id'] = image_id
-        print('Pre-configured image found. Using: {}'.format(notebook_config['ami_id']))
-    else:
-        os.environ['notebook_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
-        print('No pre-configured image found. Using default one: {}'.format(notebook_config['ami_id']))
-    
-    tag = {"Key": notebook_config['tag_name'],
-           "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
-    notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-
-    with open('/root/result.json', 'w') as f:
-        data = {"notebook_name": notebook_config['instance_name'], "error": ""}
-        json.dump(data, f)
-
-    try:
-        os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
-    except KeyError:
-        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
-
-    print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
-
 
     # launching instance for notebook server
     try:
         logging.info('[CREATE NOTEBOOK INSTANCE]')
         print('[CREATE NOTEBOOK INSTANCE]')
-        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --instance_class {} --instance_disk_size {} --primary_disk_size {}" \
-            .format(notebook_config['instance_name'], notebook_config['ami_id'], notebook_config['instance_type'],
-                    notebook_config['key_name'], get_security_group_by_name(notebook_config['security_group_name']),
-                    get_subnet_by_cidr(notebook_config['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
-                    notebook_config['role_profile_name'],
-                    notebook_config['tag_name'], notebook_config['instance_name'], instance_class,
-                    os.environ['notebook_disk_size'], notebook_config['primary_disk_size'])
+        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} " \
+                 "--iam_profile {} --infra_tag_name {} --infra_tag_value {} --instance_class {} " \
+                 "--instance_disk_size {} --primary_disk_size {}" .format(
+                  notebook_config['instance_name'], notebook_config['ami_id'], notebook_config['instance_type'],
+                  notebook_config['key_name'],
+                  dlab.meta_lib.get_security_group_by_name(notebook_config['security_group_name']),
+                  dlab.meta_lib.get_subnet_by_cidr(notebook_config['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
+                  notebook_config['role_profile_name'],
+                  notebook_config['tag_name'], notebook_config['instance_name'], instance_class,
+                  os.environ['notebook_disk_size'], notebook_config['primary_disk_size'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
 
@@ -135,6 +152,6 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed to create instance.", str(err))
+        dlab.fab.append_result("Failed to create instance.", str(err))
         sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
index 9a60aa2..d153082 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 import argparse
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,13 +42,12 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
 
     try:
         logging.info('[START NOTEBOOK]')
@@ -54,10 +55,10 @@
         params = "--tag_name {} --nb_tag_value {}".format(notebook_config['tag_name'], notebook_config['notebook_name'])
         try:
             print("Starting notebook")
-            start_ec2(notebook_config['tag_name'], notebook_config['notebook_name'])
+            dlab.actions_lib.start_ec2(notebook_config['tag_name'], notebook_config['notebook_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to start notebook.", str(err))
+            dlab.fab.append_result("Failed to start notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -65,8 +66,8 @@
     try:
         logging.info('[SETUP USER GIT CREDENTIALS]')
         print('[SETUP USER GIT CREDENTIALS]')
-        notebook_config['notebook_ip'] = get_instance_ip_address(notebook_config['tag_name'],
-                                                                 notebook_config['notebook_name']).get('Private')
+        notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
         notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
@@ -74,7 +75,7 @@
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to setup git credentials.", str(err))
+            dlab.fab.append_result("Failed to setup git credentials.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -88,15 +89,15 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
 
-
     try:
-        ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
-        dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['notebook_name'])
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['notebook_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['notebook_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(notebook_config['notebook_name']))
@@ -110,8 +111,8 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
 
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
index dc61a7a..679d4eb 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
@@ -24,13 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
+import traceback
 import os
 import uuid
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import boto3
 import argparse
 import sys
@@ -39,7 +39,7 @@
 def stop_notebook(nb_tag_value, bucket_name, tag_name, ssh_user, key_path):
     print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
     try:
-        clusters_list = get_emr_list(nb_tag_value, 'Value')
+        clusters_list = dlab.meta_lib.get_emr_list(nb_tag_value, 'Value')
         if clusters_list:
             for cluster_id in clusters_list:
                 computational_name = ''
@@ -51,11 +51,12 @@
                 for tag in cluster.get('Tags'):
                     if tag.get('Key') == 'ComputationalName':
                         computational_name = tag.get('Value')
-                s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+                dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
                 print("The bucket {} has been cleaned successfully".format(bucket_name))
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
-                remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version, computational_name)
+                dlab.actions_lib.remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version,
+                                                computational_name)
                 print("{} kernels have been removed from notebook successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
@@ -66,22 +67,22 @@
     try:
         cluster_list = []
         master_ids = []
-        cluster_instances_list = get_ec2_list('dataengine_notebook_name', nb_tag_value)
+        cluster_instances_list = dlab.meta_lib.get_ec2_list('dataengine_notebook_name', nb_tag_value)
         for instance in cluster_instances_list:
             for tag in instance.tags:
                 if tag['Key'] == 'Type' and tag['Value'] == 'master':
                     master_ids.append(instance.id)
         for id in master_ids:
-            for tag in get_instance_attr(id, 'tags'):
+            for tag in dlab.meta_lib.get_instance_attr(id, 'tags'):
                 if tag['Key'] == 'Name':
                     cluster_list.append(tag['Value'].replace(' ', '')[:-2])
-        stop_ec2('dataengine_notebook_name', nb_tag_value)
+        dlab.actions_lib.stop_ec2('dataengine_notebook_name', nb_tag_value)
     except:
         sys.exit(1)
 
     print("Stopping notebook")
     try:
-        stop_ec2(tag_name, nb_tag_value)
+        dlab.actions_lib.stop_ec2(tag_name, nb_tag_value)
     except:
         sys.exit(1)
 
@@ -95,18 +96,18 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['project_name'] = os.environ['project_name']
     notebook_config['endpoint_name'] = os.environ['endpoint_name']
-    notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+    notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                   notebook_config['project_name'],
-                                                                  notebook_config['endpoint_name'])).lower().replace('_', '-')
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+                                                                  notebook_config['endpoint_name']
+                                                                 ).lower().replace('_', '-')
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
     notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
 
     logging.info('[STOP NOTEBOOK]')
@@ -116,7 +117,7 @@
                       os.environ['conf_os_user'], notebook_config['key_path'])
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop notebook.", str(err))
         sys.exit(1)
 
 
@@ -128,7 +129,7 @@
                    "Action": "Stop notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
index caeaf70..c199089 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
@@ -24,17 +24,19 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
+import boto3
 import uuid
 
 
 def terminate_nb(nb_tag_value, bucket_name, tag_name):
     print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
     try:
-        clusters_list = get_emr_list(nb_tag_value, 'Value')
+        clusters_list = dlab.meta_lib.get_emr_list(nb_tag_value, 'Value')
         if clusters_list:
             for cluster_id in clusters_list:
                 client = boto3.client('emr')
@@ -42,10 +44,10 @@
                 cluster = cluster.get("Cluster")
                 emr_name = cluster.get('Name')
                 print('Cleaning bucket from configs for cluster {}'.format(emr_name))
-                s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+                dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
                 print("The bucket {} has been cleaned successfully".format(bucket_name))
                 print('Terminating cluster {}'.format(emr_name))
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
@@ -54,13 +56,13 @@
 
     print("Terminating data engine cluster")
     try:
-        remove_ec2('dataengine_notebook_name', nb_tag_value)
+        dlab.actions_lib.remove_ec2('dataengine_notebook_name', nb_tag_value)
     except:
         sys.exit(1)
 
     print("Terminating notebook")
     try:
-        remove_ec2(tag_name, nb_tag_value)
+        dlab.actions_lib.remove_ec2(tag_name, nb_tag_value)
     except:
         sys.exit(1)
 
@@ -73,18 +75,18 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['project_name'] = os.environ['project_name']
     notebook_config['endpoint_name'] = os.environ['endpoint_name']
-    notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+    notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                   notebook_config['project_name'],
-                                                                  notebook_config['endpoint_name'])).lower().replace('_', '-')
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+                                                                  notebook_config['endpoint_name']
+                                                                 ).lower().replace('_', '-')
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
 
     try:
         logging.info('[TERMINATE NOTEBOOK]')
@@ -93,7 +95,7 @@
             terminate_nb(notebook_config['notebook_name'], notebook_config['bucket_name'], notebook_config['tag_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate notebook.", str(err))
+            dlab.fab.append_result("Failed to terminate notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -106,6 +108,6 @@
                    "Action": "Terminate notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
index ce76a1e..3da4f63 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
@@ -21,22 +21,23 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
 import json
+import os
 
 
 if __name__ == "__main__":
     try:
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
         image_conf = dict()
         image_conf['full_image_name'] = os.environ['notebook_image_name']
 
-        image_id = get_ami_id_by_name(image_conf['full_image_name'], 'available')
+        image_id = dlab.meta_lib.get_ami_id_by_name(image_conf['full_image_name'], 'available')
         if image_id != '':
-            deregister_image(image_conf['full_image_name'])
+            dlab.actions_lib.deregister_image(image_conf['full_image_name'])
 
             with open("/root/result.json", 'w') as result:
                 res = {"notebook_image_name": image_conf['full_image_name'],
@@ -44,5 +45,5 @@
                        "Action": "Delete existing notebook image"}
                 result.write(json.dumps(res))
     except Exception as err:
-        append_result("Failed to delete existing notebook image", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to delete existing notebook image", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
index cb79b1a..9e9fb40 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import logging
@@ -53,9 +54,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create dlab ssh user.", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed to create dlab ssh user.", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
     # configuring proxy on Data Engine service
@@ -72,27 +72,27 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE DATAENGINE SERVICE]')
         print('[CONFIGURE DATAENGINE SERVICE]')
         try:
-            configure_data_engine_service_pip(emr_conf['instance_ip'], emr_conf['os_user'], emr_conf['key_path'])
+            dlab.fab.configure_data_engine_service_pip(emr_conf['instance_ip'], emr_conf['os_user'],
+                                                       emr_conf['key_path'])
             env['connection_attempts'] = 100
             env.key_filename = emr_conf['key_path']
             env.host_string = emr_conf['os_user'] + '@' + emr_conf['instance_ip']
-            sudo('echo "[main]" > /etc/yum/pluginconf.d/priorities.conf ; echo "enabled = 0" >> /etc/yum/pluginconf.d/priorities.conf')
+            sudo('echo "[main]" > /etc/yum/pluginconf.d/priorities.conf ; echo "enabled = 0" >> '
+                 '/etc/yum/pluginconf.d/priorities.conf')
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure dataengine service.", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed to configure dataengine service.", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
 
@@ -130,12 +130,11 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed edge reverse proxy template", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed edge reverse proxy template", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
     try:
@@ -150,9 +149,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed installing users key", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
 
@@ -163,75 +161,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.INFO,
                         filename=local_log_filepath)
+
     try:
-        os.environ['exploratory_name']
-    except:
-        os.environ['exploratory_name'] = ''
-    create_aws_config_files()
-    print('Generating infrastructure names and tags')
-    emr_conf = dict()
-    try:
-        emr_conf['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        emr_conf['exploratory_name'] = ''
-    try:
-        emr_conf['computational_name'] = os.environ['computational_name']
-    except:
-        emr_conf['computational_name'] = ''
-    emr_conf['apps'] = 'Hadoop Hive Hue Spark'
-    emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    emr_conf['project_name'] = os.environ['project_name']
-    emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    emr_conf['tag_name'] = emr_conf['service_base_name'] + '-Tag'
-    emr_conf['key_name'] = os.environ['conf_key_name']
-    emr_conf['region'] = os.environ['aws_region']
-    emr_conf['release_label'] = os.environ['emr_version']
-    emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
-    emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
-    emr_conf['instance_count'] = os.environ['emr_instance_count']
-    emr_conf['notebook_ip'] = get_instance_ip_address(emr_conf['tag_name'],
-                                                      os.environ['notebook_instance_name']).get('Private')
-    emr_conf['network_type'] = os.environ['conf_network_type']
-    emr_conf['role_service_name'] = os.environ['emr_service_role']
-    emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
-    emr_conf['tags'] = 'Name=' + emr_conf['service_base_name'] + '-' + os.environ['project_name'] + '-des-' + \
-                       emr_conf['exploratory_name'] + '-' + emr_conf['computational_name'] + '-' + args.uuid + \
-                       ', ' + emr_conf['service_base_name'] + '-Tag=' + emr_conf['service_base_name'] + '-' + \
-                       os.environ['project_name'] + '-des-' + emr_conf['exploratory_name'] + '-' + \
-                       emr_conf['computational_name'] + '-' + args.uuid + \
-                       ', Notebook=' + os.environ['notebook_instance_name'] + ', State=not-configured'
-    emr_conf['cluster_name'] = emr_conf['service_base_name'] + '-' + os.environ['project_name'] + '-des-' + \
-                               emr_conf['exploratory_name'] + '-' + emr_conf['computational_name'] + '-' + \
-                               args.uuid
-    emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
-                                                           emr_conf['endpoint_name'])).lower().replace('_', '-')
-    tag = {"Key": "{}-Tag".format(emr_conf['service_base_name']), "Value": "{}-{}-subnet".format(
-        emr_conf['service_base_name'], os.environ['project_name'])}
-    emr_conf['subnet_cidr'] = get_subnet_by_tag(tag)
-    emr_conf['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
-    emr_conf['all_ip_cidr'] = '0.0.0.0/0'
-    emr_conf['additional_emr_sg_name'] = '{}-{}-de-se-additional-sg'.format(emr_conf['service_base_name'],
-                                                                          os.environ['project_name'])
-    emr_conf['vpc_id'] = os.environ['aws_vpc_id']
-    emr_conf['cluster_id'] = get_emr_id_by_name(emr_conf['cluster_name'])
-    emr_conf['cluster_instances'] = get_emr_instances_list(emr_conf['cluster_id'])
-    emr_conf['cluster_master_instances'] = get_emr_instances_list(emr_conf['cluster_id'], 'MASTER')
-    emr_conf['cluster_core_instances'] = get_emr_instances_list(emr_conf['cluster_id'], 'CORE')
-    emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
-                                                               emr_conf['project_name'], emr_conf['endpoint_name'])
-    emr_conf['edge_instance_hostname'] = get_instance_private_ip_address(emr_conf['tag_name'],
-                                                                         emr_conf['edge_instance_name'])
-    if emr_conf['network_type'] == 'private':
-        emr_conf['edge_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'],
-                                                               emr_conf['edge_instance_name']).get('Private')
-    else:
-        emr_conf['edge_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'],
-                                                               emr_conf['edge_instance_name']).get('Public')
-    emr_conf['user_keyname'] = os.environ['project_name']
-    emr_conf['os_user'] = os.environ['conf_os_user']
-    emr_conf['initial_user'] = 'ec2-user'
-    emr_conf['sudo_group'] = 'wheel'
+        dlab.actions_lib.create_aws_config_files()
+        print('Generating infrastructure names and tags')
+        emr_conf = dict()
+        if 'exploratory_name' in os.environ:
+            emr_conf['exploratory_name'] = os.environ['exploratory_name']
+        else:
+            emr_conf['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            emr_conf['computational_name'] = os.environ['computational_name']
+        else:
+            emr_conf['computational_name'] = ''
+        emr_conf['apps'] = 'Hadoop Hive Hue Spark'
+        emr_conf['service_base_name'] = os.environ['conf_service_base_name']
+        emr_conf['project_name'] = os.environ['project_name']
+        emr_conf['endpoint_name'] = os.environ['endpoint_name']
+        emr_conf['tag_name'] = emr_conf['service_base_name'] + '-tag'
+        emr_conf['key_name'] = os.environ['conf_key_name']
+        emr_conf['region'] = os.environ['aws_region']
+        emr_conf['release_label'] = os.environ['emr_version']
+        emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
+        emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
+        emr_conf['instance_count'] = os.environ['emr_instance_count']
+        emr_conf['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
+        emr_conf['network_type'] = os.environ['conf_network_type']
+        emr_conf['role_service_name'] = os.environ['emr_service_role']
+        emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
+        emr_conf['tags'] = "Name={0}-{1}-{2}-des-{3}-{4}," \
+                           "{0}-tag={0}-{1}-{2}-des-{3}-{4}," \
+                           "Notebook={5}," \
+                           "State=not-configured," \
+                           "Endpoint_tag={2}".format(
+            emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'],
+            emr_conf['exploratory_name'], args.uuid, os.environ['notebook_instance_name'])
+        emr_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}-{4}' \
+            .format(emr_conf['service_base_name'],
+                    emr_conf['project_name'],
+                    emr_conf['endpoint_name'],
+                    emr_conf['computational_name'],
+                    args.uuid)
+        emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                               emr_conf['endpoint_name']).lower().replace('_', '-')
+        tag = {"Key": "{}-tag".format(emr_conf['service_base_name']), "Value": "{}-{}-{}-subnet".format(
+            emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'])}
+        emr_conf['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        emr_conf['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'],
+                                                  os.environ['conf_key_name'])
+        emr_conf['all_ip_cidr'] = '0.0.0.0/0'
+        emr_conf['additional_emr_sg_name'] = '{}-{}-{}-de-se-additional-sg'.format(emr_conf['service_base_name'],
+                                                                                   emr_conf['project_name'],
+                                                                                   emr_conf['endpoint_name'])
+        emr_conf['vpc_id'] = os.environ['aws_vpc_id']
+        emr_conf['cluster_id'] = dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])
+        emr_conf['cluster_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'])
+        emr_conf['cluster_master_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'MASTER')
+        emr_conf['cluster_core_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'CORE')
+        emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
+                                                                   emr_conf['project_name'], emr_conf['endpoint_name'])
+        emr_conf['edge_instance_hostname'] = dlab.meta_lib.get_instance_private_ip_address(
+            emr_conf['tag_name'], emr_conf['edge_instance_name'])
+        emr_conf['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(emr_conf['tag_name'],
+                                                                                 emr_conf['edge_instance_name'])
+        emr_conf['user_keyname'] = emr_conf['project_name']
+        emr_conf['os_user'] = os.environ['conf_os_user']
+        emr_conf['initial_user'] = 'ec2-user'
+        emr_conf['sudo_group'] = 'wheel'
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
+        sys.exit(1)
 
     try:
         jobs = []
@@ -252,14 +253,14 @@
         logging.info('[SUMMARY]')
         ip_address = emr_conf['cluster_master_instances'][0].get('PrivateIpAddress')
         emr_master_url = "http://" + ip_address + ":8088"
-        emr_master_acces_url = "http://" + emr_conf['edge_instance_ip'] + "/{}/".format(emr_conf['exploratory_name'] +
-                                                                                        '_' +
-                                                                                        emr_conf['computational_name'])
+        emr_master_acces_url = "https://{}/{}_{}/".format(emr_conf['edge_instance_hostname'],
+                                                          emr_conf['exploratory_name'],
+                                                          emr_conf['computational_name'])
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
         print("Service base name: {}".format(emr_conf['service_base_name']))
         print("Cluster name: {}".format(emr_conf['cluster_name']))
-        print("Cluster id: {}".format(get_emr_id_by_name(emr_conf['cluster_name'])))
+        print("Cluster id: {}".format(dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])))
         print("Key name: {}".format(emr_conf['key_name']))
         print("Region: {}".format(emr_conf['region']))
         print("EMR version: {}".format(emr_conf['release_label']))
@@ -270,7 +271,7 @@
         print("Bucket name: {}".format(emr_conf['bucket_name']))
         with open("/root/result.json", 'w') as result:
             res = {"hostname": emr_conf['cluster_name'],
-                   "instance_id": get_emr_id_by_name(emr_conf['cluster_name']),
+                   "instance_id": dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name']),
                    "key_name": emr_conf['key_name'],
                    "user_own_bucket_name": emr_conf['bucket_name'],
                    "Action": "Create new EMR cluster",
@@ -282,8 +283,7 @@
                    ]}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
-    sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_create.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_create.py
index 8a61270..6017968 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_create.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_create.py
@@ -339,7 +339,7 @@
                     ReleaseLabel=args.release_label,
                     Instances={'Ec2KeyName': args.ssh_key,
                                'KeepJobFlowAliveWhenNoSteps': not args.auto_terminate,
-                               'Ec2SubnetId': get_subnet_by_cidr(args.subnet),
+                               'Ec2SubnetId': get_subnet_by_cidr(args.subnet, os.environ['aws_notebook_vpc_id']),
                                'InstanceGroups': [
                                    {'Market': 'SPOT',
                                     'BidPrice': args.bid_price[:5],
@@ -377,7 +377,7 @@
                                'Ec2KeyName': args.ssh_key,
                                # 'Placement': {'AvailabilityZone': args.availability_zone},
                                'KeepJobFlowAliveWhenNoSteps': not args.auto_terminate,
-                               'Ec2SubnetId': get_subnet_by_cidr(args.subnet),
+                               'Ec2SubnetId': get_subnet_by_cidr(args.subnet, os.environ['aws_notebook_vpc_id']),
                                'AdditionalMasterSecurityGroups': [
                                    get_security_group_by_name(
                                        args.additional_emr_sg)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_jars_parser.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_jars_parser.py
index f011e6a..0626285 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_jars_parser.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_jars_parser.py
@@ -53,15 +53,22 @@
         endpoint = "https://s3.{}.amazonaws.com.cn".format(args.region)
     else:
         endpoint = "https://s3-{}.amazonaws.com".format(args.region)
+    os.system('touch /tmp/scala_version')
+    scala_ver = subprocess.check_output("spark-submit --version 2>&1 | awk '/Scala version / {gsub(/,/, \"\"); print $4}'",
+                                        shell=True).decode('UTF-8')
+    with open('/tmp/scala_version', 'w') as outfile:
+        outfile.write(scala_ver)
+    os.system('touch /tmp/r_version')
+    r_ver = subprocess.check_output("R --version | awk '/version / {print $3}'", shell=True).decode('UTF-8')
+    with open('/tmp/r_version', 'w') as outfile:
+        outfile.write(r_ver)
     os.system('touch /tmp/python_version')
-    python_ver = subprocess.check_output("python3.5 -V 2>/dev/null | awk '{print $2}'", shell=True)
-    if python_ver != '':
-        with open('/tmp/python_version', 'w') as outfile:
-            outfile.write(python_ver)
-    else:
-        python_ver = subprocess.check_output("python3.4 -V 2>/dev/null | awk '{print $2}'", shell=True)
-        with open('/tmp/python_version', 'w') as outfile:
-            outfile.write(python_ver)
+    for v in range(4, 7):
+        python_ver_checker = "python3.{} -V 2>/dev/null".format(v) + " | awk '{print $2}'"
+        python_ver = subprocess.check_output(python_ver_checker, shell=True)
+        if python_ver != '':
+            with open('/tmp/python_version', 'w') as outfile:
+                outfile.write(python_ver)
     os.system('/bin/tar -zhcvf /tmp/jars.tar.gz '
               '--no-recursion '
               '--absolute-names '
@@ -126,4 +133,20 @@
               format(args.bucket,
                      args.user_name,
                      args.cluster_name,
+                     endpoint, args.region))
+    os.system('aws s3 cp /tmp/scala_version '
+              's3://{}/{}/{}/ '
+              '--endpoint-url {} '
+              '--region {} --sse AES256'.
+              format(args.bucket,
+                     args.user_name,
+                     args.cluster_name,
+                     endpoint, args.region))
+    os.system('aws s3 cp /tmp/r_version '
+              's3://{}/{}/{}/ '
+              '--endpoint-url {} '
+              '--region {} --sse AES256'.
+              format(args.bucket,
+                     args.user_name,
+                     args.cluster_name,
                      endpoint, args.region))
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
index 8958fee..7dd94d9 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import argparse
 import sys
 import os
@@ -46,117 +47,111 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
-        os.environ['exploratory_name']
-    except:
-        os.environ['exploratory_name'] = ''
-    if os.path.exists('/response/.emr_creating_{}'.format(os.environ['exploratory_name'])):
-        time.sleep(30)
-    create_aws_config_files()
-    emr_conf = dict()
-    emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    emr_conf['project_name'] = os.environ['project_name']
-    emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    edge_status = get_instance_status(emr_conf['service_base_name'] + '-Tag', '{0}-{1}-{2}-edge'
-                                      .format(emr_conf['service_base_name'],
-                                              emr_conf['project_name'],
-                                              emr_conf['endpoint_name']))
-    if edge_status != 'running':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = get_instance_hostname(
-            emr_conf['service_base_name'] + '-Tag',
-            emr_conf['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable',
-                            os.environ['ssn_dlab_path'],
-                            os.environ['conf_os_user'], ssn_hostname)
-        append_result("Edge node is unavailable")
+        emr_conf = dict()
+        if 'exploratory_name' in os.environ:
+            emr_conf['exploratory_name'] = os.environ['exploratory_name']
+        else:
+            emr_conf['exploratory_name'] = ''
+        if os.path.exists('/response/.emr_creating_{}'.format(emr_conf['exploratory_name'])):
+            time.sleep(30)
+        dlab.actions_lib.create_aws_config_files()
+        emr_conf['service_base_name'] = os.environ['conf_service_base_name']
+        emr_conf['project_name'] = os.environ['project_name']
+        emr_conf['endpoint_name'] = os.environ['endpoint_name']
+        edge_status = dlab.meta_lib.get_instance_status(
+            '{}-tag'.format(emr_conf['service_base_name']),
+            '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                      emr_conf['endpoint_name']))
+        if edge_status != 'running':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            ssn_hostname = dlab.meta_lib.get_instance_hostname(
+                emr_conf['service_base_name'] + '-tag',
+                emr_conf['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable',
+                                         os.environ['ssn_dlab_path'],
+                                         os.environ['conf_os_user'], ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+        print('Generating infrastructure names and tags')
+        if 'computational_name' in os.environ:
+            emr_conf['computational_name'] = os.environ['computational_name']
+        else:
+            emr_conf['computational_name'] = ''
+        emr_conf['apps'] = 'Hadoop Hive Hue Spark'
+        emr_conf['tag_name'] = '{0}-tag'.format(emr_conf['service_base_name'])
+        emr_conf['key_name'] = os.environ['conf_key_name']
+        emr_conf['endpoint_tag'] = emr_conf['endpoint_name']
+        emr_conf['project_tag'] = emr_conf['project_name']
+        emr_conf['region'] = os.environ['aws_region']
+        emr_conf['release_label'] = os.environ['emr_version']
+        emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
+                                                                   emr_conf['project_name'], emr_conf['endpoint_name'])
+        emr_conf['edge_security_group_name'] = '{0}-sg'.format(emr_conf['edge_instance_name'])
+        emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
+        emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
+        emr_conf['instance_count'] = os.environ['emr_instance_count']
+        emr_conf['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
+        emr_conf['role_service_name'] = os.environ['emr_service_role']
+        emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
+        emr_conf['tags'] = 'Name={0}-{1}-{5}-des-{3},' \
+                           '{0}-tag={0}-{1}-{5}-des-{3},' \
+                           'Notebook={4},' \
+                           'State=not-configured,' \
+                           'ComputationalName={3},' \
+                           'Endpoint_tag={5}'\
+            .format(emr_conf['service_base_name'],
+                    emr_conf['project_name'],
+                    emr_conf['exploratory_name'],
+                    emr_conf['computational_name'],
+                    os.environ['notebook_instance_name'],
+                    emr_conf['endpoint_name'])
+        emr_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}-{4}'\
+            .format(emr_conf['service_base_name'],
+                    emr_conf['project_name'],
+                    emr_conf['endpoint_name'],
+                    emr_conf['computational_name'],
+                    args.uuid)
+        emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                               emr_conf['endpoint_name']).lower().replace('_', '-')
+        emr_conf['configurations'] = '[]'
+        if 'emr_configurations' in os.environ:
+            emr_conf['configurations'] = os.environ['emr_configurations']
+
+        tag = {"Key": "{}-tag".format(emr_conf['service_base_name']),
+               "Value": "{}-{}-{}-subnet".format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                 emr_conf['endpoint_name'])}
+        emr_conf['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        emr_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        emr_conf['all_ip_cidr'] = '0.0.0.0/0'
+        emr_conf['additional_emr_sg_name'] = '{}-{}-{}-de-se-additional-sg'\
+            .format(emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'])
+        emr_conf['vpc_id'] = os.environ['aws_vpc_id']
+        emr_conf['vpc2_id'] = os.environ['aws_notebook_vpc_id']
+        emr_conf['provision_instance_ip'] = None
+        try:
+            emr_conf['provision_instance_ip'] = dlab.meta_lib.get_instance_ip_address(
+                emr_conf['tag_name'], '{0}-{1}-endpoint'.format(emr_conf['service_base_name'],
+                                                                emr_conf['endpoint_name'])).get('Private') + "/32"
+        except:
+            emr_conf['provision_instance_ip'] = dlab.meta_lib.get_instance_ip_address(
+                emr_conf['tag_name'], '{0}-ssn'.format(emr_conf['service_base_name'])).get('Private') + "/32"
+        if os.environ['emr_slave_instance_spot'] == 'True':
+            ondemand_price = float(dlab.meta_lib.get_ec2_price(emr_conf['slave_instance_type'], emr_conf['region']))
+            emr_conf['slave_bid_price'] = (ondemand_price * int(os.environ['emr_slave_instance_spot_pct_price'])) / 100
+        else:
+            emr_conf['slave_bid_price'] = 0
+        if 'emr_timeout' in os.environ:
+            emr_conf['emr_timeout'] = os.environ['emr_timeout']
+        else:
+            emr_conf['emr_timeout'] = "1200"
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
         sys.exit(1)
-    print('Generating infrastructure names and tags')
-    try:
-        emr_conf['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        emr_conf['exploratory_name'] = ''
-    try:
-        emr_conf['computational_name'] = os.environ['computational_name']
-    except:
-        emr_conf['computational_name'] = ''
-    emr_conf['apps'] = 'Hadoop Hive Hue Spark'
 
-    emr_conf['tag_name'] = '{0}-Tag'.format(emr_conf['service_base_name'])
-    emr_conf['key_name'] = os.environ['conf_key_name']
-    emr_conf['endpoint_tag'] = os.environ['endpoint_name']
-    emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    emr_conf['project_tag'] = os.environ['project_name']
-    emr_conf['region'] = os.environ['aws_region']
-    emr_conf['release_label'] = os.environ['emr_version']
-    emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
-                                                               os.environ['project_name'], emr_conf['endpoint_tag'])
-    emr_conf['edge_security_group_name'] = '{0}-sg'.format(emr_conf['edge_instance_name'])
-    emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
-    emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
-    emr_conf['instance_count'] = os.environ['emr_instance_count']
-    emr_conf['notebook_ip'] = get_instance_ip_address(
-        emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
-    emr_conf['role_service_name'] = os.environ['emr_service_role']
-    emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
-    emr_conf['tags'] = 'Name={0}-{1}-des-{2}-{3},' \
-                       '{0}-Tag={0}-{1}-des-{2}-{3},' \
-                       'Notebook={4},' \
-                       'State=not-configured,' \
-                       'ComputationalName={3}' \
-        .format(emr_conf['service_base_name'],
-                os.environ['project_name'],
-                emr_conf['exploratory_name'],
-                emr_conf['computational_name'],
-                os.environ['notebook_instance_name'])
-    emr_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}-{4}'\
-        .format(emr_conf['service_base_name'],
-                os.environ['project_name'],
-                emr_conf['exploratory_name'],
-                emr_conf['computational_name'],
-                args.uuid)
-    emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
-                                                           emr_conf['endpoint_name'])).lower().replace('_', '-')
-    emr_conf['configurations'] = '[]'
-    if 'emr_configurations' in os.environ:
-        emr_conf['configurations'] = os.environ['emr_configurations']
-
-    tag = {"Key": "{}-Tag".format(emr_conf['service_base_name']),
-           "Value": "{}-{}-subnet".format(emr_conf['service_base_name'],
-                                          os.environ['project_name'])}
-    emr_conf['subnet_cidr'] = get_subnet_by_tag(tag)
-    emr_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    emr_conf['all_ip_cidr'] = '0.0.0.0/0'
-    emr_conf['additional_emr_sg_name'] = '{}-{}-de-se-additional-sg'\
-        .format(emr_conf['service_base_name'], os.environ['project_name'])
-    emr_conf['vpc_id'] = os.environ['aws_vpc_id']
-    emr_conf['vpc2_id'] = os.environ['aws_notebook_vpc_id']
-    emr_conf['provision_instance_ip'] = None
-    try:
-        emr_conf['provision_instance_ip'] = get_instance_ip_address(
-            emr_conf['tag_name'], '{0}-{1}-endpoint'.format(emr_conf['service_base_name'],
-                                                            os.environ['endpoint_name'])).get('Private') + "/32"
-    except:
-        emr_conf['provision_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'], '{0}-ssn'.format(
-            emr_conf['service_base_name'])).get('Private') + "/32"
-    if os.environ['emr_slave_instance_spot'] == 'True':
-        ondemand_price = float(get_ec2_price(emr_conf['slave_instance_type'], emr_conf['region']))
-        emr_conf['slave_bid_price'] = (ondemand_price * int(os.environ['emr_slave_instance_spot_pct_price'])) / 100
-    else:
-        emr_conf['slave_bid_price'] = 0
-
-    try:
-        emr_conf['emr_timeout'] = os.environ['emr_timeout']
-    except:
-        emr_conf['emr_timeout'] = "1200"
-
-    print("Will create exploratory environment with edge node "
-          "as access point as following: {}".
-          format(json.dumps(emr_conf,
-                            sort_keys=True,
-                            indent=4,
-                            separators=(',', ': '))))
+    print("Will create exploratory environment with edge node as access point as following: {}".format(
+        json.dumps(emr_conf, sort_keys=True, indent=4, separators=(',', ': '))))
     logging.info(json.dumps(emr_conf))
 
     with open('/root/result.json', 'w') as f:
@@ -164,11 +159,11 @@
         json.dump(data, f)
 
     try:
-        emr_waiter(emr_conf['tag_name'], os.environ['notebook_instance_name'])
-        local('touch /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
+        dlab.meta_lib.emr_waiter(emr_conf['tag_name'], os.environ['notebook_instance_name'])
+        local('touch /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
     except Exception as err:
         traceback.print_exc()
-        append_result("EMR waiter fail.", str(err))
+        dlab.fab.append_result("EMR waiter fail.", str(err))
         sys.exit(1)
 
     with open('/root/result.json', 'w') as f:
@@ -178,8 +173,8 @@
     logging.info('[CREATING ADDITIONAL SECURITY GROUPS FOR EMR]')
     print("[CREATING ADDITIONAL SECURITY GROUPS FOR EMR]")
     try:
-        edge_group_id = check_security_group(emr_conf['edge_security_group_name'])
-        cluster_sg_ingress = format_sg([
+        edge_group_id = dlab.meta_lib.check_security_group(emr_conf['edge_security_group_name'])
+        cluster_sg_ingress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": emr_conf['subnet_cidr']}],
@@ -199,7 +194,7 @@
                 "PrefixListIds": []
             }
         ])
-        cluster_sg_egress = format_sg([
+        cluster_sg_egress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": emr_conf['subnet_cidr']}],
@@ -243,18 +238,18 @@
                    emr_conf['cluster_name'], True)
         try:
             if 'conf_additional_tags' in os.environ:
-                os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                    emr_conf['project_tag'], emr_conf['endpoint_tag'])
+                os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                    emr_conf['project_tag'], emr_conf['endpoint_tag'], os.environ['conf_additional_tags'])
             else:
-                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(emr_conf['project_tag'], emr_conf['endpoint_tag'])
+                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(emr_conf['project_tag'],
+                                                                                               emr_conf['endpoint_tag'])
             print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create sg.", str(err))
+        dlab.fab.append_result("Failed to create sg.", str(err))
         sys.exit(1)
 
     local("echo Waiting for changes to propagate; sleep 10")
@@ -302,7 +297,7 @@
                     emr_conf['region'],
                     emr_conf['tags'],
                     os.environ['conf_key_dir'],
-                    os.environ['project_name'],
+                    emr_conf['project_name'],
                     os.environ['emr_slave_instance_spot'],
                     str(emr_conf['slave_bid_price']),
                     emr_conf['service_base_name'],
@@ -313,14 +308,12 @@
         except:
             traceback.print_exc()
             raise Exception
-
         cluster_name = emr_conf['cluster_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], emr_conf['key_name'])
-        local('rm /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
+        local('rm /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create EMR Cluster.", str(err))
-        local('rm /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
-        emr_id = get_emr_id_by_name(emr_conf['cluster_name'])
-        terminate_emr(emr_id)
+        dlab.fab.append_result("Failed to create EMR Cluster.", str(err))
+        local('rm /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
+        emr_id = dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])
+        dlab.actions_lib.terminate_emr(emr_id)
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
index 05f2e1f..e9551e3 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
@@ -21,18 +21,22 @@
 #
 # ******************************************************************************
 
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import boto3
+import logging
 import argparse
 import sys
 import os
+import traceback
+import json
 
 
 def terminate_emr_cluster(emr_name, bucket_name, tag_name, nb_tag_value, ssh_user, key_path):
     print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
     try:
-        clusters_list = get_emr_list(emr_name, 'Value')
+        clusters_list = dlab.meta_lib.get_emr_list(emr_name, 'Value')
         if clusters_list:
             for cluster_id in clusters_list:
                 computational_name = ''
@@ -44,13 +48,13 @@
                 for tag in cluster.get('Tags'):
                     if tag.get('Key') == 'ComputationalName':
                         computational_name = tag.get('Value')
-                s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+                dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
                 print("The bucket {} has been cleaned successfully".format(bucket_name))
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
                 print("Removing EMR kernels from notebook")
-                remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path,
-                               emr_version, computational_name)
+                dlab.actions_lib.remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path,
+                                                emr_version, computational_name)
         else:
             print("There are no EMR clusters to terminate.")
     except:
@@ -66,19 +70,18 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     emr_conf = dict()
-    emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    emr_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     emr_conf['emr_name'] = os.environ['emr_cluster_name']
     emr_conf['notebook_name'] = os.environ['notebook_instance_name']
     emr_conf['project_name'] = os.environ['project_name']
     emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
-                                                           emr_conf['endpoint_name'])).lower().replace('_', '-')
+    emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                           emr_conf['endpoint_name']).lower().replace('_', '-')
     emr_conf['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
-    emr_conf['tag_name'] = emr_conf['service_base_name'] + '-Tag'
+    emr_conf['tag_name'] = emr_conf['service_base_name'] + '-tag'
 
     try:
         logging.info('[TERMINATE EMR CLUSTER]')
@@ -88,7 +91,7 @@
                                   emr_conf['notebook_name'], os.environ['conf_os_user'], emr_conf['key_path'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate EMR cluster.", str(err))
+            dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -101,6 +104,6 @@
                    "Action": "Terminate EMR cluster"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
index 20f58af..e0a4f0c 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -37,13 +38,13 @@
 
 def configure_slave(slave_number, data_engine):
     slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
-    slave_hostname = get_instance_private_ip_address(data_engine['tag_name'], slave_name)
+    slave_hostname = dlab.meta_lib.get_instance_private_ip_address(data_engine['tag_name'], slave_name)
     try:
         logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
         print('[CREATING DLAB SSH USER ON SLAVE NODE]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
-            (slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
-             data_engine['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
+            data_engine['initial_user'], data_engine['dlab_ssh_user'], data_engine['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -51,12 +52,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to create ssh user on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -70,12 +67,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to clean slave instance.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean slave instance.", str(err))
         sys.exit(1)
 
     try:
@@ -91,12 +84,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to configure proxy on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -111,18 +100,15 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to install prerequisites on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
         print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
-        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
+        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
+                 "--scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
             format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
                    os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
@@ -133,12 +119,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to configure slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure slave node.", str(err))
         sys.exit(1)
 
     try:
@@ -153,15 +135,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed install users key on slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed install users key on slave node.", str(err))
         sys.exit(1)
 
 
+def clear_resources():
+    dlab.actions_lib.remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
+    for i in range(data_engine['instance_count'] - 1):
+        slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+        dlab.actions_lib.remove_ec2(data_engine['tag_name'], slave_name)
+
+
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
                                                os.environ['request_id'])
@@ -173,81 +158,77 @@
     try:
         print('Generating infrastructure names and tags')
         data_engine = dict()
-        try:
+        if 'exploratory_name' in os.environ:
             data_engine['exploratory_name'] = os.environ['exploratory_name']
-        except:
+        else:
             data_engine['exploratory_name'] = ''
-        try:
+        if 'computational_name' in os.environ:
             data_engine['computational_name'] = os.environ['computational_name']
-        except:
+        else:
             data_engine['computational_name'] = ''
-        data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['region'] = os.environ['aws_region']
         data_engine['network_type'] = os.environ['conf_network_type']
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
         data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
         data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
         data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
-        data_engine['dataengine_master_security_group_name'] = data_engine['service_base_name'] + '-' + \
-                                                               os.environ['project_name'] + '-dataengine-master-sg'
-        data_engine['dataengine_slave_security_group_name'] = data_engine['service_base_name'] + '-' + \
-                                                              os.environ['project_name'] + '-dataengine-slave-sg'
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+        data_engine['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
         tag = {"Key": data_engine['tag_name'],
-               "Value": "{}-{}-subnet".format(data_engine['service_base_name'], os.environ['project_name'])}
-        data_engine['subnet_cidr'] = get_subnet_by_tag(tag)
-        data_engine['notebook_dataengine_role_profile_name'] = data_engine['service_base_name']. \
-                                                                   lower().replace('-', '_') + "-" + \
-                                                               os.environ['project_name'] + '-nb-de-Profile'
+               "Value": "{}-{}-{}-subnet".format(data_engine['service_base_name'], data_engine['project_name'],
+                                                 data_engine['endpoint_name'])}
+        data_engine['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
-        master_node_hostname = get_instance_hostname(data_engine['tag_name'], data_engine['master_node_name'])
+        master_node_hostname = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'],
+                                                                   data_engine['master_node_name'])
         data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
-        data_engine['user_keyname'] = os.environ['project_name']
+        data_engine['user_keyname'] = data_engine['project_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        data_engine['project_name'] = os.environ['project_name']
-        data_engine['endpoint_name'] = os.environ['endpoint_name']
         edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
                                                        data_engine['project_name'], data_engine['endpoint_name'])
-        edge_instance_hostname = get_instance_hostname(data_engine['tag_name'], edge_instance_name)
-        edge_instance_private_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Private')
-        if data_engine['network_type'] == 'private':
-            edge_instance_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Private')
-        else:
-            edge_instance_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Public')
-
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        data_engine['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'],
+                                                                                    edge_instance_name)
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            data_engine['initial_user'] = 'ubuntu'
+            data_engine['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            data_engine['initial_user'] = 'ec2-user'
+            data_engine['sudo_group'] = 'wheel'
     except Exception as err:
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
         data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(int(os.environ['dataengine_instance_count']) - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER ON MASTER NODE]')
         print('[CREATING DLAB SSH USER ON MASTER NODE]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
-             data_engine['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
+            data_engine['initial_user'], data_engine['dlab_ssh_user'], data_engine['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -255,12 +236,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to create ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on master.", str(err))
         sys.exit(1)
 
     try:
@@ -274,12 +251,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to clean master instance.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -295,12 +268,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to configure proxy on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on master.", str(err))
         sys.exit(1)
 
     try:
@@ -315,12 +284,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to install prerequisites on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
         sys.exit(1)
 
     try:
@@ -335,18 +300,15 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed install users key on master node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed install users key on master node.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE MASTER NODE]')
         print('[CONFIGURE MASTER NODE]')
-        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
+        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
+                 "--scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
             format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
                    os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
@@ -357,12 +319,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure master node", str(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
+        dlab.fab.append_result("Failed to configure master node", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -377,17 +335,15 @@
             if job.exitcode != 0:
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
+        dlab.fab.append_result("Failed to configure slave nodes.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
-        notebook_instance_ip = get_instance_private_ip_address('Name', os.environ['notebook_instance_name'])
+        notebook_instance_ip = dlab.meta_lib.get_instance_private_ip_address('Name',
+                                                                             os.environ['notebook_instance_name'])
         additional_info = {
             "computational_name": data_engine['computational_name'],
             "master_node_hostname": master_node_hostname,
@@ -412,21 +368,20 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
+        dlab.fab.append_result("Failed to configure reverse proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        ip_address = get_instance_ip_address(data_engine['tag_name'],
-                                             data_engine['master_node_name']).get('Private')
+        ip_address = dlab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
+                                                           data_engine['master_node_name']).get('Private')
         spark_master_url = "http://" + ip_address + ":8080"
-        spark_master_acces_url = "http://" + edge_instance_ip + "/{}/".format(data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
+        spark_master_access_url = "https://{}/{}_{}/".format(data_engine['edge_instance_hostname'],
+                                                             data_engine['exploratory_name'],
+                                                             data_engine['computational_name'])
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
         print("Service base name: {}".format(data_engine['service_base_name']))
@@ -437,17 +392,19 @@
         print("Instance count: {}".format(str(data_engine['instance_count'])))
         with open("/root/result.json", 'w') as result:
             res = {"hostname": data_engine['cluster_name'],
-                   "instance_id": get_instance_by_name(data_engine['tag_name'], data_engine['master_node_name']),
+                   "instance_id": dlab.meta_lib.get_instance_by_name(data_engine['tag_name'],
+                                                                     data_engine['master_node_name']),
                    "key_name": data_engine['key_name'],
                    "Action": "Create new Data Engine",
                    "computational_url": [
                        {"description": "Apache Spark Master",
-                        "url": spark_master_acces_url},
+                        "url": spark_master_access_url},
                        #{"description": "Apache Spark Master (via tunnel)",
                         #"url": spark_master_url}
                    ]}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
index 0cefaeb..ad19f7a 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -43,84 +44,85 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
         data_engine = dict()
-        data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        edge_status = get_instance_status(data_engine['service_base_name'] + '-Tag',
-                                          '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
-                                                                    os.environ['project_name'],
-                                                                    os.environ['endpoint_name']))
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        edge_status = dlab.meta_lib.get_instance_status(
+            data_engine['service_base_name'] + '-tag', '{0}-{1}-{2}-edge'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name']))
         if edge_status != 'running':
             logging.info('ERROR: Edge node is unavailable! Aborting...')
             print('ERROR: Edge node is unavailable! Aborting...')
-            ssn_hostname = get_instance_hostname(data_engine['service_base_name'] + '-Tag',
-                                                 data_engine['service_base_name'] + '-ssn')
-            put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                                ssn_hostname)
-            append_result("Edge node is unavailable")
+            ssn_hostname = dlab.meta_lib.get_instance_hostname(data_engine['service_base_name'] + '-tag',
+                                                               data_engine['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
             sys.exit(1)
         print('Generating infrastructure names and tags')
-
-        try:
+        if 'exploratory_name' in os.environ:
             data_engine['exploratory_name'] = os.environ['exploratory_name']
-        except:
+        else:
             data_engine['exploratory_name'] = ''
-        try:
+        if 'computational_name' in os.environ:
             data_engine['computational_name'] = os.environ['computational_name']
-        except:
+        else:
             data_engine['computational_name'] = ''
-
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['region'] = os.environ['aws_region']
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
         data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
         data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
         data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
-        data_engine['dataengine_master_security_group_name'] = '{}-{}-dataengine-master-sg' \
-            .format(data_engine['service_base_name'], os.environ['project_name'])
-        data_engine['dataengine_slave_security_group_name'] = '{}-{}-dataengine-slave-sg' \
-            .format(data_engine['service_base_name'], os.environ['project_name'])
-        data_engine['tag_name'] = '{}-Tag'.format(data_engine['service_base_name'])
+        data_engine['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['tag_name'] = '{}-tag'.format(data_engine['service_base_name'])
         tag = {"Key": data_engine['tag_name'],
-               "Value": "{}-{}-subnet".format(data_engine['service_base_name'], os.environ['project_name'])}
-        data_engine['subnet_cidr'] = get_subnet_by_tag(tag)
-        data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-nb-de-Profile' \
-            .format(data_engine['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
+               "Value": "{}-{}-{}-subnet".format(data_engine['service_base_name'], data_engine['project_name'],
+                                                 data_engine['endpoint_name'])}
+        data_engine['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
         data_engine['cluster_nodes_tag'] = {"Key": "dataengine_notebook_name",
                                             "Value": os.environ['notebook_instance_name']}
         data_engine['cluster_nodes_resource_tag'] = {"Key": os.environ['conf_tag_resource_id'],
-                                                     "Value": data_engine['service_base_name'] + ':' +
-                                                              data_engine['cluster_name']}
+                                                     "Value": "{}:{}".format(data_engine['service_base_name'],
+                                                                             data_engine['cluster_name'])}
         data_engine['cluster_nodes_billing_tag'] = {"Key": os.environ['conf_billing_tag_key'],
-                                                     "Value": os.environ['conf_billing_tag_value']}
+                                                    "Value": os.environ['conf_billing_tag_value']}
         data_engine['primary_disk_size'] = '30'
         data_engine['instance_class'] = 'dataengine'
 
         if os.environ['conf_shared_image_enabled'] == 'false':
-            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
+            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'],
+                os.environ['application'])
         else:
             data_engine['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['application'])
+                                                                                     data_engine['endpoint_name'],
+                                                                                     os.environ['application'])
         data_engine['notebook_image_name'] = (
-            lambda x: '{0}-{1}-{2}-{3}'.format(data_engine['service_base_name'],
-                                               os.environ['project_name'],
-                                               os.environ['application'],
-                                               os.environ['notebook_image_name'].lower().replace('_', '-')) if (
+            lambda x: '{0}-{1}-{4}-{2}-{3}'.format(data_engine['service_base_name'],
+                                                   data_engine['project_name'],
+                                                   os.environ['application'],
+                                                   os.environ['notebook_image_name'],
+                                                   data_engine['endpoint_name']) if (
                     x != 'None' and x != '')
             else data_engine['expected_image_name'])(str(os.environ.get('notebook_image_name')))
         print('Searching pre-configured images')
-        data_engine['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
-        image_id = get_ami_id_by_name(data_engine['notebook_image_name'], 'available')
+        data_engine['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+            os.environ['conf_os_family'])])
+        image_id = dlab.meta_lib.get_ami_id_by_name(data_engine['notebook_image_name'], 'available')
         if image_id != '' and os.environ['application'] in os.environ['dataengine_image_notebooks'].split(','):
             data_engine['ami_id'] = image_id
             print('Pre-configured image found. Using: {}'.format(data_engine['ami_id']))
@@ -129,8 +131,7 @@
             print('No pre-configured image found. Using default one: {}'.format(data_engine['ami_id']))
 
     except Exception as err:
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     with open('/root/result.json', 'w') as f:
@@ -138,35 +139,39 @@
         json.dump(data, f)
 
     try:
-        os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+        os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+            data_engine['project_name'], data_engine['endpoint_name'], os.environ['conf_additional_tags'])
     except KeyError:
-        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
+        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(data_engine['project_name'],
+                                                                                       data_engine['endpoint_name'])
     print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
 
     try:
         logging.info('[CREATE MASTER NODE]')
         print('[CREATE MASTER NODE]')
         data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "master"}
-        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
+        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} " \
+                 "--subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} " \
+                 "--instance_class {}" \
             .format(data_engine['master_node_name'], data_engine['ami_id'], data_engine['master_size'],
                     data_engine['key_name'],
-                    get_security_group_by_name(data_engine['dataengine_master_security_group_name']),
-                    get_subnet_by_cidr(data_engine['subnet_cidr']),
+                    dlab.meta_lib.get_security_group_by_name(data_engine['dataengine_master_security_group_name']),
+                    dlab.meta_lib.get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
                     data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
                     data_engine['master_node_name'], data_engine['primary_disk_size'], data_engine['instance_class'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
-            data_engine['master_id'] = get_instance_by_name(data_engine['tag_name'], data_engine['master_node_name'])
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag'], False)
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_resource_tag'], False)
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_billing_tag'], False)
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag_type'], False)
+            data_engine['master_id'] = dlab.meta_lib.get_instance_by_name(data_engine['tag_name'],
+                                                                          data_engine['master_node_name'])
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag'], False)
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_resource_tag'], False)
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_billing_tag'], False)
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag_type'], False)
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create master instance.", str(err))
+        dlab.fab.append_result("Failed to create master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -175,31 +180,32 @@
             print('[CREATE SLAVE NODE {}]'.format(i + 1))
             slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
             data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "slave"}
-            params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
+            params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} " \
+                     "--subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} " \
+                     "--primary_disk_size {} --instance_class {}" \
                 .format(slave_name, data_engine['ami_id'], data_engine['slave_size'],
                         data_engine['key_name'],
-                        get_security_group_by_name(data_engine['dataengine_slave_security_group_name']),
-                        get_subnet_by_cidr(data_engine['subnet_cidr']),
+                        dlab.meta_lib.get_security_group_by_name(data_engine['dataengine_slave_security_group_name']),
+                        dlab.meta_lib.get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
                         data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
                         slave_name, data_engine['primary_disk_size'], data_engine['instance_class'])
             try:
                 local("~/scripts/{}.py {}".format('common_create_instance', params))
-                data_engine['slave_id'] = get_instance_by_name(data_engine['tag_name'], slave_name)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag'], False)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_resource_tag'], False)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_billing_tag'], False)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag_type'], False)
+                data_engine['slave_id'] = dlab.meta_lib.get_instance_by_name(data_engine['tag_name'], slave_name)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag'], False)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_resource_tag'], False)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_billing_tag'], False)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag_type'], False)
             except:
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
+        dlab.actions_lib.remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
         for i in range(data_engine['instance_count'] - 1):
             slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
             try:
-                remove_ec2(data_engine['tag_name'], slave_name)
+                dlab.actions_lib.remove_ec2(data_engine['tag_name'], slave_name)
             except:
                 print("The slave instance {} hasn't been created.".format(slave_name))
-        append_result("Failed to create slave instances.", str(err))
+        dlab.fab.append_result("Failed to create slave instances.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
index 2e299d4..0450ff7 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
@@ -24,14 +24,18 @@
 import logging
 import json
 import os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
+import traceback
+from fabric.api import *
 
 
 def start_data_engine(cluster_name):
     print("Start Data Engine")
     try:
-        start_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+        dlab.actions_lib.start_ec2(os.environ['conf_tag_resource_id'], cluster_name)
     except:
         sys.exit(1)
 
@@ -47,7 +51,7 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     data_engine = dict()
     
@@ -59,15 +63,14 @@
         data_engine['computational_name'] = os.environ['computational_name']
     except:
         data_engine['computational_name'] = ''
-    data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
     data_engine['project_name'] = os.environ['project_name']
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + \
-        data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + \
-        data_engine['computational_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
 
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
 
     logging.info('[START DATA ENGINE CLUSTER]')
     print('[START DATA ENGINE CLUSTER]')
@@ -76,19 +79,18 @@
                                          data_engine['cluster_name']))
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to start Data Engine.", str(err))
+        dlab.fab.append_result("Failed to start Data Engine.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[UPDATE LAST ACTIVITY TIME]')
         print('[UPDATE LAST ACTIVITY TIME]')
         data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
-        data_engine['notebook_ip'] = get_instance_ip_address(data_engine['tag_name'],
-                                                                    os.environ['notebook_instance_name']).get('Private')
-        data_engine['computational_ip'] = get_instance_ip_address(data_engine['tag_name'],
-                                                                         data_engine['computational_id']).get(
-            'Private')
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+        data_engine['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            data_engine['tag_name'], os.environ['notebook_instance_name']).get('Private')
+        data_engine['computational_ip'] = dlab.meta_lib.get_instance_ip_address(
+            data_engine['tag_name'], data_engine['computational_id']).get('Private')
         data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
             .format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -97,7 +99,7 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -108,6 +110,6 @@
                    "Action": "Start Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
index 3cb0d3b..d31d395 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
@@ -24,14 +24,15 @@
 import logging
 import json
 import os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
 import sys
 
 
 def stop_data_engine(cluster_name):
     print("Stop Data Engine")
     try:
-        stop_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+        dlab.actions_lib.stop_ec2(os.environ['conf_tag_resource_id'], cluster_name)
     except:
         sys.exit(1)
 
@@ -47,7 +48,7 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     data_engine_config = dict()
     try:
@@ -58,14 +59,13 @@
         data_engine_config['computational_name'] = os.environ['computational_name']
     except:
         data_engine_config['computational_name'] = ''
-    data_engine_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    data_engine_config['service_base_name'] = (os.environ['conf_service_base_name'])
     data_engine_config['project_name'] = os.environ['project_name']
-    data_engine_config['cluster_name'] = \
-        data_engine_config['service_base_name'] + '-' \
-        + data_engine_config['project_name'] + '-de-' + \
-        data_engine_config['exploratory_name'] + '-' \
-        + data_engine_config['computational_name']
+    data_engine_config['endpoint_name'] = os.environ['endpoint_name']
+    data_engine_config['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine_config['service_base_name'],
+                                                                 data_engine_config['project_name'],
+                                                                 data_engine_config['endpoint_name'],
+                                                                 data_engine_config['computational_name'])
 
     logging.info('[STOP DATA ENGINE CLUSTER]')
     print('[STOP DATA ENGINE CLUSTER]')
@@ -74,7 +74,7 @@
                                         data_engine_config['cluster_name']))
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to stop Data Engine.", str(err))
+        dlab.fab.append_result("Failed to stop Data Engine.", str(err))
         sys.exit(1)
 
     try:
@@ -83,6 +83,6 @@
                    "Action": "Stop Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
index a036f74..7d8c10d 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
@@ -24,8 +24,9 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import traceback
 import os
 
 
@@ -34,14 +35,14 @@
                           cluster_name, remote_kernel_name):
     print("Terminating data engine cluster")
     try:
-        remove_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+        dlab.actions_lib.remove_ec2(os.environ['conf_tag_resource_id'], cluster_name)
     except:
         sys.exit(1)
 
     print("Removing Data Engine kernels from notebook")
     try:
-        remove_dataengine_kernels(tag_name, notebook_name,
-                                  os_user, key_path, remote_kernel_name)
+        dlab.actions_lib.remove_dataengine_kernels(tag_name, notebook_name,
+                                                   os_user, key_path, remote_kernel_name)
     except:
         sys.exit(1)
 
@@ -57,7 +58,7 @@
                         filename=local_log_filepath)
     # generating variables dictionary
     print('Generating infrastructure names and tags')
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     data_engine = dict()
     
     try:
@@ -68,18 +69,16 @@
         data_engine['computational_name'] = os.environ['computational_name']
     except:
         data_engine['computational_name'] = ''
-    data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+    data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+    data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
     data_engine['project_name'] = os.environ['project_name']
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + \
-        data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' +\
-        data_engine['computational_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     data_engine['notebook_name'] = os.environ['notebook_instance_name']
-    data_engine['key_path'] = os.environ['conf_key_dir'] + '/' + \
-                              os.environ['conf_key_name'] + '.pem'
+    data_engine['key_path'] = "{}/{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
 
     try:
         logging.info('[TERMINATE DATA ENGINE]')
@@ -93,7 +92,7 @@
                     data_engine['cluster_name']), data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Data Engine.", str(err))
+            dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -104,6 +103,6 @@
                    "Action": "Terminate Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
index e257843..a2ca856 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -42,69 +44,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -123,16 +132,16 @@
         print('[CONFIGURE PROXY ON DEEP LEARNING  INSTANCE]')
         additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
         params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -145,12 +154,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -166,9 +174,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -178,7 +185,7 @@
                  "--os_user {2} --jupyter_version {3} " \
                  "--scala_version {4} --spark_version {5} " \
                  "--hadoop_version {6} --region {7} " \
-                 "--r_mirror {8} --ip_adress {9} --exploratory_name {10} --edge_ip {11}" \
+                 "--r_mirror {8} --ip_address {9} --exploratory_name {10} --edge_ip {11}" \
                  .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
                          os.environ['notebook_jupyter_version'], os.environ['notebook_scala_version'],
                          os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
@@ -190,9 +197,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Deep Learning node.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -203,12 +209,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -224,9 +229,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -251,99 +255,107 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        notebook_config['project_name'], notebook_config['endpoint_name'],
+                        os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                        notebook_config['project_name'], notebook_config['endpoint_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], notebook_config['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
-                        os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                        notebook_config['endpoint_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        tensor_board_url = 'http://' + ip_address + ':6006'
+        jupyter_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+                                                                notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    tensor_board_url = 'http://' + ip_address + ':6006'
-    jupyter_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    tensorboard_acces_url = "http://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'],notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'],notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensor_board_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url},
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensor_board_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url},
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py b/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
index 87d26c6..d96ef49 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
@@ -22,10 +22,17 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
+import uuid
+from fabric.api import *
+
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,73 +42,103 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    print('Generating infrastructure names and tags')
-    edge_conf = dict()
-    edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    edge_conf['key_name'] = os.environ['conf_key_name']
-    edge_conf['user_key'] = os.environ['key']
-    edge_conf['project_name'] = os.environ['project_name']
-    edge_conf['endpoint_name'] = os.environ['endpoint_name']
-    edge_conf['instance_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'], edge_conf['project_name'],
-                                                        edge_conf['endpoint_name'])
-    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
-    edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
-                                                           edge_conf['project_name'],
-                                                           edge_conf['endpoint_name']).lower().replace('_', '-')
-    edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
-                                                                     edge_conf['endpoint_name']).lower().replace('_', '-')
-    edge_conf['edge_security_group_name'] = '{}-sg'.format(edge_conf['instance_name'])
-    edge_conf['notebook_instance_name'] = '{}-{}-nb'.format(edge_conf['service_base_name'],
-                                                            os.environ['project_name'])
-    edge_conf['notebook_role_profile_name'] = '{}-{}-nb-Profile' \
-        .format(edge_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    edge_conf['notebook_security_group_name'] = '{}-{}-nb-sg'.format(edge_conf['service_base_name'],
-                                                                     os.environ['project_name'])
-    edge_conf['dataengine_instances_name'] = '{}-{}-dataengine' \
-        .format(edge_conf['service_base_name'], os.environ['project_name'])
-    tag = {"Key": edge_conf['tag_name'],
-           "Value": "{}-{}-subnet".format(edge_conf['service_base_name'], os.environ['project_name'])}
-    edge_conf['private_subnet_cidr'] = get_subnet_by_tag(tag)
-    edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-    edge_conf['network_type'] = os.environ['conf_network_type']
-    if edge_conf['network_type'] == 'public':
-        edge_conf['edge_public_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
-            'Public')
-        edge_conf['edge_private_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
-            'Private')
-    elif edge_conf['network_type'] == 'private':
-        edge_conf['edge_private_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
-            'Private')
-        edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
-    edge_conf['vpc1_cidrs'] = get_vpc_cidr_by_id(os.environ['aws_vpc_id'])
-    try:
-        edge_conf['vpc2_cidrs'] = get_vpc_cidr_by_id(os.environ['aws_notebook_vpc_id'])
-        edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs'] + edge_conf['vpc2_cidrs']))
-    except KeyError:
-        edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs']))
-
-    edge_conf['allowed_ip_cidr'] = list()
-    for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-        edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
-
-
-    instance_hostname = get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
+    def clear_resources():
+        dlab.actions_lib.remove_all_iam_resources('notebook', edge_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', edge_conf['project_name'])
+        dlab.actions_lib.remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+        dlab.actions_lib.remove_sgroups(edge_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(edge_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(edge_conf['instance_name'])
+        dlab.actions_lib.remove_s3('edge', edge_conf['project_name'])
 
     try:
+        print('Generating infrastructure names and tags')
+        edge_conf = dict()
+        edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        edge_conf['key_name'] = os.environ['conf_key_name']
+        edge_conf['user_key'] = os.environ['key']
+        edge_conf['project_name'] = os.environ['project_name']
+        edge_conf['endpoint_name'] = os.environ['endpoint_name']
+        edge_conf['instance_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                                            edge_conf['endpoint_name'])
+        edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
+        edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name']).replace('_', '-').lower()
+        edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                         edge_conf['endpoint_name']
+                                                                         ).replace('_', '-').lower()
+        edge_conf['edge_security_group_name'] = '{}-{}-{}-edge-sg'.format(edge_conf['service_base_name'],
+                                                                          edge_conf['project_name'],
+                                                                          edge_conf['endpoint_name'])
+        edge_conf['notebook_instance_name'] = '{}-{}-{}-nb'.format(edge_conf['service_base_name'],
+                                                                   edge_conf['project_name'],
+                                                                   edge_conf['endpoint_name'])
+        edge_conf['notebook_role_profile_name'] = '{}-{}-{}-nb-profile'.format(edge_conf['service_base_name'],
+                                                                               edge_conf['project_name'],
+                                                                               edge_conf['endpoint_name'])
+        edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
+                                                                            edge_conf['project_name'],
+                                                                            edge_conf['endpoint_name'])
+        edge_conf['dataengine_instances_name'] = '{}-{}-{}-de'.format(edge_conf['service_base_name'],
+                                                                      edge_conf['project_name'],
+                                                                      edge_conf['endpoint_name'])
+        tag = {"Key": edge_conf['tag_name'],
+               "Value": "{}-{}-{}-subnet".format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                                 edge_conf['endpoint_name'])}
+        edge_conf['private_subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        edge_conf['network_type'] = os.environ['conf_network_type']
+        if edge_conf['network_type'] == 'public':
+            edge_conf['edge_public_ip'] = dlab.meta_lib.get_instance_ip_address(edge_conf['tag_name'],
+                                                                  edge_conf['instance_name']).get('Public')
+            edge_conf['edge_private_ip'] = dlab.meta_lib.get_instance_ip_address(
+                edge_conf['tag_name'], edge_conf['instance_name']).get('Private')
+        elif edge_conf['network_type'] == 'private':
+            edge_conf['edge_private_ip'] = dlab.meta_lib.get_instance_ip_address(
+                edge_conf['tag_name'], edge_conf['instance_name']).get('Private')
+            edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
+        edge_conf['vpc1_cidrs'] = dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_vpc_id'])
+        try:
+            edge_conf['vpc2_cidrs'] = dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_notebook_vpc_id'])
+            edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs'] + edge_conf['vpc2_cidrs']))
+        except KeyError:
+            edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs']))
+
+        edge_conf['allowed_ip_cidr'] = list()
+        for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+            edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+
+        edge_conf['instance_hostname'] = dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'],
+                                                                             edge_conf['instance_name'])
+        edge_conf['keyfile_name'] = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
+
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            edge_conf['step_cert_sans'] = ' --san {0} '.format(edge_conf['edge_private_ip'])
+            if edge_conf['network_type'] == 'public':
+                edge_conf['step_cert_sans'] += ' --san {0} --san {1}'.format(
+                    dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name']),
+                    edge_conf['edge_public_ip'])
+        else:
+            edge_conf['step_cert_sans'] = ''
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            edge_conf['initial_user'] = 'ubuntu'
+            edge_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            edge_conf['initial_user'] = 'ec2-user'
+            edge_conf['sudo_group'] = 'wheel'
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        sys.exit(1)
 
+    try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             edge_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            edge_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -109,37 +146,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING PREREQUISITES]')
         logging.info('[INSTALLING PREREQUISITES]')
         params = "--hostname {} --keyfile {} --user {} --region {}".\
-            format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['aws_region'])
+            format(edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+                   os.environ['aws_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -147,88 +171,88 @@
         logging.info('[INSTALLING HTTP PROXY]')
         additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
                              "template_file": "/root/templates/squid.conf",
-                             "project_name": os.environ['project_name'],
+                             "project_name": edge_conf['project_name'],
                              "ldap_host": os.environ['ldap_hostname'],
                              "ldap_dn": os.environ['ldap_dn'],
                              "ldap_user": os.environ['ldap_service_username'],
                              "ldap_password": os.environ['ldap_service_password'],
                              "vpc_cidrs": edge_conf['vpc_cidrs'],
                              "allowed_ip_cidr": edge_conf['allowed_ip_cidr']}
-        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
-                 .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('configure_http_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing http proxy.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing http proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
 
     try:
         print('[INSTALLING USERs KEY]')
         logging.info('[INSTALLING USERs KEY]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": edge_conf['project_name'],
                              "user_keydir": os.environ['conf_key_dir'],
                              "user_key": edge_conf['user_key']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key." + str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing users key." + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING NGINX REVERSE PROXY]')
         logging.info('[INSTALLING NGINX REVERSE PROXY]')
-        params = "--hostname {} --keyfile {} --user {}" \
-            .format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'])
+        edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
+        params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
+                 "--step_cert_sans '{}' ".format(
+                  edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+                  '{}-{}-{}'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                    edge_conf['endpoint_name']),
+                  edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
         try:
             local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
+        keycloak_params = "--service_base_name {} --keycloak_auth_server_url {} --keycloak_realm_name {} " \
+                          "--keycloak_user {} --keycloak_user_password {} --keycloak_client_secret {} " \
+                          "--edge_public_ip {} --hostname {} --project_name {} --endpoint_name {} ".format(
+                           edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
+                           os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
+                           os.environ['keycloak_user_password'], edge_conf['keycloak_client_secret'],
+                           edge_conf['instance_hostname'], edge_conf['instance_hostname'], edge_conf['project_name'],
+                           edge_conf['endpoint_name'])
+        try:
+            local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
+        except:
+            traceback.print_exc()
+            raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing nginx reverse proxy." + str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing nginx reverse proxy." + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
-        print("Hostname: {}".format(instance_hostname))
+        print("Hostname: {}".format(edge_conf['instance_hostname']))
         print("Public IP: {}".format(edge_conf['edge_public_ip']))
         print("Private IP: {}".format(edge_conf['edge_private_ip']))
-        print("Instance ID: {}".format(get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name'])))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(edge_conf['tag_name'],
+                                                                          edge_conf['instance_name'])))
         print("Key name: {}".format(edge_conf['key_name']))
         print("Bucket name: {}".format(edge_conf['bucket_name']))
         print("Shared bucket name: {}".format(edge_conf['shared_bucket_name']))
@@ -237,10 +261,10 @@
         print("Edge SG: {}".format(edge_conf['edge_security_group_name']))
         print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr']))
         with open("/root/result.json", 'w') as result:
-            res = {"hostname": instance_hostname,
+            res = {"hostname": edge_conf['instance_hostname'],
                    "public_ip": edge_conf['edge_public_ip'],
                    "ip": edge_conf['edge_private_ip'],
-                   "instance_id": get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name']),
+                   "instance_id": dlab.meta_lib.get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name']),
                    "key_name": edge_conf['key_name'],
                    "user_own_bicket_name": edge_conf['bucket_name'],
                    "shared_bucket_name": edge_conf['shared_bucket_name'],
@@ -251,13 +275,13 @@
                    "edge_sg": edge_conf['edge_security_group_name'],
                    "notebook_subnet": edge_conf['private_subnet_cidr'],
                    "full_edge_conf": edge_conf,
-                   "project_name": os.environ['project_name'],
+                   "project_name": edge_conf['project_name'],
                    "@class": "com.epam.dlab.dto.aws.edge.EdgeInfoAws",
                    "Action": "Create new EDGE server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
+        sys.exit(1)
 
-    sys.exit(0)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_start.py b/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
index 2449cd7..a9f856a 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
@@ -21,9 +21,14 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
+import logging
+import os
+import json
+
 
 
 if __name__ == "__main__":
@@ -35,29 +40,28 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     edge_conf['project_name'] = os.environ['project_name']
     edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
+    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
 
     logging.info('[START EDGE]')
     print('[START EDGE]')
     try:
-        start_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+        dlab.actions_lib.start_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to start edge.", str(err))
+        dlab.fab.append_result("Failed to start edge.", str(err))
         sys.exit(1)
 
     try:
-        instance_hostname = get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
-        addresses = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name'])
+        instance_hostname = dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
+        addresses = dlab.meta_lib.get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name'])
         ip_address = addresses.get('Private')
         public_ip_address = addresses.get('Public')
         print('[SUMMARY]')
@@ -74,7 +78,6 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_status.py b/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
index 86ff6e3..d8bd92e 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
@@ -23,20 +23,26 @@
 
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import logging
+import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Getting statuses of DLAB resources')
 
     try:
@@ -49,6 +55,5 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to collect necessary information.", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to collect necessary information.", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py b/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
index 3f99b36..3948781 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
+import os
+import logging
+import json
 
 
 if __name__ == "__main__":
@@ -35,23 +39,22 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     edge_conf['project_name'] = os.environ['project_name']
     edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
+    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
 
     logging.info('[STOP EDGE]')
     print('[STOP EDGE]')
     try:
-        stop_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+        dlab.actions_lib.stop_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
     except Exception as err:
-        append_result("Failed to stop edge.", str(err))
+        dlab.fab.append_result("Failed to stop edge.", str(err))
         sys.exit(1)
 
     try:
@@ -60,7 +63,6 @@
                    "Action": "Stop edge server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
index 6b17dec..cc53b22 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -42,67 +44,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -110,9 +122,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -120,36 +131,34 @@
         logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
         print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
         additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
-        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
-                    notebook_config['dlab_ssh_user'])
+        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}".format(
+            instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
     try:
         logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
         print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
-        params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".\
-            format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'],
-                   edge_instance_private_ip)
+        params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".format(
+            instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'],
+            edge_instance_private_ip)
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -164,7 +173,7 @@
                  "--os_user {5} " \
                  "--scala_version {6} " \
                  "--r_mirror {7} " \
-                 "--ip_adress {8} " \
+                 "--ip_address {8} " \
                  "--exploratory_name {9} " \
                  "--edge_ip {10}".\
             format(instance_hostname,
@@ -184,9 +193,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -199,12 +207,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -216,12 +223,11 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -237,9 +243,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -264,92 +269,103 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
-                    os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                    os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("Image name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Jupyter URL: {}".format(jupyter_dns_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
-    print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("Image name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Jupyter URL: {}".format(jupyter_dns_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_access_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyter_dataengine-service_create_configs.py b/infrastructure-provisioning/src/general/scripts/aws/jupyter_dataengine-service_create_configs.py
index 9af6935..5f17c07 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/jupyter_dataengine-service_create_configs.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyter_dataengine-service_create_configs.py
@@ -42,6 +42,8 @@
 parser.add_argument('--dry_run', type=str, default='false')
 parser.add_argument('--emr_version', type=str, default='')
 parser.add_argument('--spark_version', type=str, default='')
+parser.add_argument('--scala_version', type=str, default='')
+parser.add_argument('--r_version', type=str, default='')
 parser.add_argument('--hadoop_version', type=str, default='')
 parser.add_argument('--region', type=str, default='')
 parser.add_argument('--excluded_lines', type=str, default='')
@@ -50,7 +52,6 @@
 parser.add_argument('--pip_mirror', type=str, default='')
 parser.add_argument('--numpy_version', type=str, default='')
 parser.add_argument('--application', type=str, default='')
-parser.add_argument('--r_enabled', type=str, default='')
 args = parser.parse_args()
 
 emr_dir = '/opt/' + args.emr_version + '/jars/'
@@ -64,14 +65,13 @@
     local('mkdir -p {}/r_{}/'.format(kernels_dir, args.cluster_name))
     kernel_path = "{}/r_{}/kernel.json".format(kernels_dir, args.cluster_name)
     template_file = "/tmp/r_dataengine-service_template.json"
-    r_version = local("R --version | awk '/version / {print $3}'", capture = True)
 
     with open(template_file, 'r') as f:
         text = f.read()
     text = text.replace('CLUSTER_NAME', args.cluster_name)
     text = text.replace('SPARK_PATH', spark_path)
     text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
-    text = text.replace('R_KERNEL_VERSION', 'R-{}'.format(str(r_version)))
+    text = text.replace('R_KERNEL_VERSION', 'R-{}'.format(args.r_version))
     text = text.replace('DATAENGINE-SERVICE_VERSION', args.emr_version)
     if 'emr-4.' in args.emr_version:
         text = text.replace('YARN_CLI_TYPE', 'yarn-client')
@@ -85,7 +85,7 @@
 
 def toree_kernel(args):
     spark_path = '/opt/' + args.emr_version + '/' + args.cluster_name + '/spark/'
-    scala_version = local('scala -e "println(scala.util.Properties.versionNumberString)"', capture=True)
+    scala_version = local("Spark-submit --version 2>&1 | awk '/Scala version / {gsub(/,/, \"\"); print $4}'")
     if args.emr_version == 'emr-4.3.0' or args.emr_version == 'emr-4.6.0' or args.emr_version == 'emr-4.8.0':
         local('mkdir -p ' + kernels_dir + 'toree_' + args.cluster_name + '/')
         kernel_path = kernels_dir + "toree_" + args.cluster_name + "/kernel.json"
@@ -96,7 +96,7 @@
         text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
         text = text.replace('SPARK_PATH', spark_path)
         text = text.replace('DATAENGINE-SERVICE_VERSION', args.emr_version)
-        text = text.replace('SCALA_VERSION', scala_version)
+        text = text.replace('SCALA_VERSION', args.scala_version)
         with open(kernel_path, 'w') as f:
             f.write(text)
         local('touch /tmp/kernel_var.json')
@@ -117,7 +117,7 @@
         text = text.replace('SPARK_PATH', spark_path)
         text = text.replace('OS_USER', args.os_user)
         text = text.replace('DATAENGINE-SERVICE_VERSION', args.emr_version)
-        text = text.replace('SCALA_VERSION', scala_version)
+        text = text.replace('SCALA_VERSION', args.scala_version)
         with open(kernel_path, 'w') as f:
             f.write(text)
         local('touch /tmp/kernel_var.json')
@@ -142,19 +142,19 @@
     breeze_tmp_dir = '/tmp/breeze_tmp_emr/'
     local('sudo mkdir -p ' + new_jars_directory_path)
     local('mkdir -p ' + breeze_tmp_dir)
-    local('wget http://central.maven.org/maven2/org/scalanlp/breeze_2.11/0.12/breeze_2.11-0.12.jar -O ' +
+    local('wget https://repo1.maven.org/maven2/org/scalanlp/breeze_2.11/0.12/breeze_2.11-0.12.jar -O ' +
           breeze_tmp_dir + 'breeze_2.11-0.12.jar')
-    local('wget http://central.maven.org/maven2/org/scalanlp/breeze-natives_2.11/0.12/breeze-natives_2.11-0.12.jar -O '
+    local('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-natives_2.11/0.12/breeze-natives_2.11-0.12.jar -O '
           + breeze_tmp_dir + 'breeze-natives_2.11-0.12.jar')
-    local('wget http://central.maven.org/maven2/org/scalanlp/breeze-viz_2.11/0.12/breeze-viz_2.11-0.12.jar -O ' +
+    local('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-viz_2.11/0.12/breeze-viz_2.11-0.12.jar -O ' +
           breeze_tmp_dir + 'breeze-viz_2.11-0.12.jar')
-    local('wget http://central.maven.org/maven2/org/scalanlp/breeze-macros_2.11/0.12/breeze-macros_2.11-0.12.jar -O ' +
+    local('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-macros_2.11/0.12/breeze-macros_2.11-0.12.jar -O ' +
           breeze_tmp_dir + 'breeze-macros_2.11-0.12.jar')
-    local('wget http://central.maven.org/maven2/org/scalanlp/breeze-parent_2.11/0.12/breeze-parent_2.11-0.12.jar -O ' +
+    local('wget https://repo1.maven.org/maven2/org/scalanlp/breeze-parent_2.11/0.12/breeze-parent_2.11-0.12.jar -O ' +
           breeze_tmp_dir + 'breeze-parent_2.11-0.12.jar')
-    local('wget http://central.maven.org/maven2/org/jfree/jfreechart/1.0.19/jfreechart-1.0.19.jar -O ' +
+    local('wget https://repo1.maven.org/maven2/org/jfree/jfreechart/1.0.19/jfreechart-1.0.19.jar -O ' +
           breeze_tmp_dir + 'jfreechart-1.0.19.jar')
-    local('wget http://central.maven.org/maven2/org/jfree/jcommon/1.0.24/jcommon-1.0.24.jar -O ' +
+    local('wget https://repo1.maven.org/maven2/org/jfree/jcommon/1.0.24/jcommon-1.0.24.jar -O ' +
           breeze_tmp_dir + 'jcommon-1.0.24.jar')
     local('wget --no-check-certificate https://brunelvis.org/jar/spark-kernel-brunel-all-2.3.jar -O ' +
           breeze_tmp_dir + 'spark-kernel-brunel-all-2.3.jar')
@@ -163,8 +163,6 @@
           """\/jars\/usr\/other\/*/' """ + spark_defaults_path + """" """)
 
 
-
-
 if __name__ == "__main__":
     if args.dry_run == 'true':
         parser.print_help()
@@ -177,7 +175,8 @@
         pyspark_kernel(kernels_dir, args.emr_version, args.cluster_name, args.spark_version, args.bucket,
                        args.project_name, args.region, args.os_user, args.application, args.pip_mirror, args.numpy_version)
         toree_kernel(args)
-        if args.r_enabled == 'true':
+        if args.r_version != 'false':
+            print('R version: {}'.format(args.r_version))
             r_kernel(args)
         spark_defaults(args)
         configuring_notebook(args.emr_version)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyter_install_dataengine-service_kernels.py b/infrastructure-provisioning/src/general/scripts/aws/jupyter_install_dataengine-service_kernels.py
index 6900a6a..fb29f0a 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/jupyter_install_dataengine-service_kernels.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyter_install_dataengine-service_kernels.py
@@ -79,8 +79,22 @@
     hadoop_version = get_hadoop_version(args.cluster_name)
     r_enabled = os.environ['notebook_r_enabled']
     numpy_version = os.environ['notebook_numpy_version']
-    sudo("/usr/bin/python /usr/local/bin/jupyter_dataengine-service_create_configs.py --bucket " + args.bucket +
-         " --cluster_name " + args.cluster_name + " --emr_version " + args.emr_version + " --spark_version " +
-         spark_version + " --hadoop_version " + hadoop_version + " --region " + args.region + " --excluded_lines '"
-         + args.emr_excluded_spark_properties + "' --project_name " + args.project_name + " --os_user " + args.os_user +
-         " --pip_mirror " + args.pip_mirror + " --numpy_version " + numpy_version + " --application " + args.application + " --r_enabled " + r_enabled)
+    s3_client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region)
+    s3_client.download_file(args.bucket, args.project_name + '/' + args.cluster_name + '/scala_version',
+                            '/tmp/scala_version')
+    with file('/tmp/scala_version') as f:
+        scala_version = str(f.read()).rstrip()
+        print(scala_version)
+    if r_enabled == 'true':
+        s3_client.download_file(args.bucket, args.project_name + '/' + args.cluster_name + '/r_version', '/tmp/r_version')
+        with file('/tmp/r_version') as g:
+            r_version = str(g.read()).rstrip()
+            print(r_version)
+    else:
+        r_version = 'false'
+    sudo("/usr/bin/python /usr/local/bin/jupyter_dataengine-service_create_configs.py --bucket " + args.bucket
+         + " --cluster_name " + args.cluster_name + " --emr_version " + args.emr_version + " --spark_version "
+         + spark_version + " --scala_version " + scala_version + " --r_version " + r_version + " --hadoop_version "
+         + hadoop_version + " --region " + args.region + " --excluded_lines '" + args.emr_excluded_spark_properties
+         + "' --project_name " + args.project_name + " --os_user " + args.os_user + " --pip_mirror "
+         + args.pip_mirror + " --numpy_version " + numpy_version + " --application " + args.application)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
new file mode 100644
index 0000000..d828df5
--- /dev/null
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
@@ -0,0 +1,390 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+import json
+import sys
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import argparse
+from fabric.api import *
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--uuid', type=str, default='')
+args = parser.parse_args()
+
+
+if __name__ == "__main__":
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        if os.environ['conf_os_family'] == 'debian':
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
+
+        logging.info('[CREATING DLAB SSH USER]')
+        print('[CREATING DLAB SSH USER]')
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
+            (instance_hostname,  "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+             notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
+
+        try:
+            local("~/scripts/{}.py {}".format('create_ssh_user', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    # configuring proxy on Notebook instance
+    try:
+        logging.info('[CONFIGURE PROXY ON JUPYTERLAB INSTANCE]')
+        print('[CONFIGURE PROXY ON JUPYTERLAB INSTANCE]')
+        additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
+        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_proxy', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    # updating repositories & installing python packages
+    try:
+        logging.info('[INSTALLING PREREQUISITES TO JUPYTERLAB NOTEBOOK INSTANCE]')
+        print('[INSTALLING PREREQUISITES TO JUPYTERLAB NOTEBOOK INSTANCE]')
+        params = "--hostname {} --keyfile {} --user {} --region {}".\
+            format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'])
+        try:
+            local("~/scripts/{}.py {}".format('install_prerequisites', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    # installing and configuring jupiter and all dependencies
+    try:
+        logging.info('[CONFIGURE JUPYTERLAB NOTEBOOK INSTANCE]')
+        print('[CONFIGURE JUPYTERLAB NOTEBOOK INSTANCE]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--edge_ip {} " \
+                 "--region {} " \
+                 "--spark_version {} " \
+                 "--hadoop_version {} " \
+                 "--os_user {} " \
+                 "--scala_version {} " \
+                 "--r_mirror {} " \
+                 "--ip_address {} " \
+                 "--exploratory_name {}".\
+            format(instance_hostname,
+                   keyfile_name,
+                   edge_ip,
+                   os.environ['aws_region'],
+                   os.environ['notebook_spark_version'],
+                   os.environ['notebook_hadoop_version'],
+                   notebook_config['dlab_ssh_user'],
+                   os.environ['notebook_scala_version'],
+                   os.environ['notebook_r_mirror'],
+                   notebook_config['ip_address'],
+                   notebook_config['exploratory_name'])
+        try:
+            local("~/scripts/{}.py {}".format('configure_jupyterlab_node', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure jupyterlab.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        print('[INSTALLING USERs KEY]')
+        logging.info('[INSTALLING USERs KEY]')
+        additional_config = {"user_keyname": notebook_config['user_keyname'],
+                             "user_keydir": os.environ['conf_key_dir']}
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('install_user_key', params))
+        except:
+            dlab.fab.append_result("Failed installing users key")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        print('[SETUP USER GIT CREDENTIALS]')
+        logging.info('[SETUP USER GIT CREDENTIALS]')
+        params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
+            .format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
+        try:
+            local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
+            local("~/scripts/{}.py {}".format('manage_git_creds', params))
+        except:
+            dlab.fab.append_result("Failed setup git credentials")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        logging.info('[POST CONFIGURING PROCESS]')
+        print('[POST CONFIGURING PROCESS')
+        if notebook_config['notebook_image_name'] not in [notebook_config['expected_image_name'], 'None']:
+            params = "--hostname {} --keyfile {} --os_user {} --nb_tag_name {} --nb_tag_value {}" \
+                .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
+                        notebook_config['tag_name'], notebook_config['instance_name'])
+            try:
+                local("~/scripts/{}.py {}".format('common_remove_remote_kernels', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        additional_info = {
+            'instance_hostname': instance_hostname,
+            'tensor': False
+        }
+        params = "--edge_hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} " \
+                 "--type {} " \
+                 "--exploratory_name {} " \
+                 "--additional_info '{}'"\
+            .format(edge_instance_hostname,
+                    keyfile_name,
+                    notebook_config['dlab_ssh_user'],
+                    'jupyter',
+                    notebook_config['exploratory_name'],
+                    json.dumps(additional_info))
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
+        except:
+            dlab.fab.append_result("Failed edge reverse proxy template")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    if notebook_config['shared_image_enabled'] == 'true':
+        try:
+            print('[CREATING AMI]')
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
+            if ami_id == '':
+                print("Looks like it's first time we configure notebook server. Creating image.")
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
+                if image_id != '':
+                    print("Image was successfully created. It's ID is {}".format(image_id))
+        except Exception as err:
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            sys.exit(1)
+
+    try:
+        print('[CONFIGURING PROXY FOR DOCKER]')
+        logging.info('[CONFIGURING PROXY FOR DOCKER]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   keyfile_name,
+                   notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/configure_proxy_for_docker.py {}".format(params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        print('[STARTING JUPYTER CONTAINER]')
+        logging.info('[STARTING JUPYTER CONTAINER]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   keyfile_name,
+                   notebook_config['dlab_ssh_user'])
+        try:
+           local("~/scripts/jupyterlab_container_start.py {}".format(params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to start Jupyter container.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_acces_url = "http://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                            notebook_config['exploratory_name'])
+        jupyter_ungit_acces_url = "http://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                               notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("Image name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("JupyterLab URL: {}".format(jupyter_ip_url))
+        print("JupyterLab URL: {}".format(jupyter_dns_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "JupyterLab",
+                        "url": jupyter_notebook_acces_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_acces_url},
+                       #{"description": "JupyterLab (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
index a5e8a79..9d44ba5 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
@@ -22,12 +22,16 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
 import traceback
 import boto3
+import logging
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -38,158 +42,181 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    create_aws_config_files()
-    print('Generating infrastructure names and tags')
-    project_conf = dict()
-    project_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    project_conf['endpoint_name'] = os.environ['endpoint_name']
-    project_conf['endpoint_tag'] = os.environ['endpoint_name']
-    project_conf['project_name'] = os.environ['project_name']
-    project_conf['project_tag'] = os.environ['project_name']
-    project_conf['key_name'] = os.environ['conf_key_name']
-    project_conf['public_subnet_id'] = os.environ['aws_subnet_id']
-    project_conf['vpc_id'] = os.environ['aws_vpc_id']
-    project_conf['region'] = os.environ['aws_region']
-    project_conf['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
-    project_conf['instance_size'] = os.environ['aws_edge_instance_size']
-    project_conf['sg_ids'] = os.environ['aws_security_groups_ids']
-    project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
-                                                                os.environ['project_name'], os.environ['endpoint_name'])
-    project_conf['tag_name'] = '{}-Tag'.format(project_conf['service_base_name'])
-    project_conf['bucket_name_tag'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
-                                                                  project_conf['project_name'],
-                                                                  project_conf['endpoint_name'])
-    project_conf['bucket_name'] = project_conf['bucket_name_tag'].lower().replace('_', '-')
-    project_conf['ssn_bucket_name'] = '{}-ssn-bucket'.format(
-        project_conf['service_base_name']).lower().replace('_', '-')
-    project_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(
-        project_conf['service_base_name'], project_conf['endpoint_tag']).lower().replace('_', '-')
-    project_conf['edge_role_name'] = '{}-{}-edge-Role'.format(
-        project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['edge_role_profile_name'] = '{}-{}-edge-Profile'.format(
-        project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['edge_policy_name'] = '{}-{}-edge-Policy'.format(
-        project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['edge_security_group_name'] = '{}-sg'.format(project_conf['edge_instance_name'])
-    project_conf['notebook_instance_name'] = '{}-{}-nb'.format(project_conf['service_base_name'],
-                                                            os.environ['project_name'])
-    project_conf['dataengine_instances_name'] = '{}-{}-dataengine' \
-        .format(project_conf['service_base_name'], os.environ['project_name'])
-    project_conf['notebook_dataengine_role_name'] = '{}-{}-nb-de-Role' \
-        .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['notebook_dataengine_policy_name'] = '{}-{}-nb-de-Policy' \
-        .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['notebook_dataengine_role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['notebook_security_group_name'] = '{}-{}-nb-sg'.format(project_conf['service_base_name'],
-                                                                     os.environ['project_name'])
-    project_conf['private_subnet_prefix'] = os.environ['aws_private_subnet_prefix']
-    project_conf['private_subnet_name'] = '{0}-{1}-subnet'.format(project_conf['service_base_name'],
-                                                               os.environ['project_name'])
-    project_conf['dataengine_master_security_group_name'] = '{}-{}-dataengine-master-sg' \
-        .format(project_conf['service_base_name'], os.environ['project_name'])
-    project_conf['dataengine_slave_security_group_name'] = '{}-{}-dataengine-slave-sg' \
-        .format(project_conf['service_base_name'], os.environ['project_name'])
-    project_conf['allowed_ip_cidr'] = list()
-    for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-        project_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ','')})
-    project_conf['network_type'] = os.environ['conf_network_type']
-    project_conf['all_ip_cidr'] = '0.0.0.0/0'
-    project_conf['zone'] = os.environ['aws_region'] + os.environ['aws_zone']
-    project_conf['elastic_ip_name'] = '{0}-{1}-edge-EIP'.format(project_conf['service_base_name'],
-                                                             os.environ['project_name'])
-    project_conf['provision_instance_ip'] = None
     try:
-        project_conf['provision_instance_ip'] = get_instance_ip_address(
-            project_conf['tag_name'], '{0}-{1}-endpoint'.format(project_conf['service_base_name'],
-                                                                os.environ['endpoint_name'])).get('Private') + "/32"
-    except:
-        project_conf['provision_instance_ip'] = get_instance_ip_address(project_conf['tag_name'], '{0}-ssn'.format(
-            project_conf['service_base_name'])).get('Private') + "/32"
-    if 'aws_user_predefined_s3_policies' not in os.environ:
-        os.environ['aws_user_predefined_s3_policies'] = 'None'
-
-    try:
-        if os.environ['conf_user_subnets_range'] == '':
-            raise KeyError
-    except KeyError:
-        os.environ['conf_user_subnets_range'] = ''
-
-    # FUSE in case of absence of user's key
-    try:
-        project_conf['user_key'] = os.environ['key']
+        dlab.actions_lib.create_aws_config_files()
+        print('Generating infrastructure names and tags')
+        project_conf = dict()
+        project_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        project_conf['endpoint_name'] = os.environ['endpoint_name']
+        project_conf['endpoint_tag'] = project_conf['endpoint_name']
+        project_conf['project_name'] = os.environ['project_name']
+        project_conf['project_tag'] = project_conf['project_name']
+        project_conf['key_name'] = os.environ['conf_key_name']
+        project_conf['public_subnet_id'] = os.environ['aws_subnet_id']
+        project_conf['vpc_id'] = os.environ['aws_vpc_id']
+        project_conf['region'] = os.environ['aws_region']
+        project_conf['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+            os.environ['conf_os_family'])])
+        project_conf['instance_size'] = os.environ['aws_edge_instance_size']
+        project_conf['sg_ids'] = os.environ['aws_security_groups_ids']
+        project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
+                                                                    project_conf['project_name'],
+                                                                    project_conf['endpoint_name'])
+        project_conf['tag_name'] = '{}-tag'.format(project_conf['service_base_name'])
+        project_conf['bucket_name_tag'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+                                                                      project_conf['project_name'],
+                                                                      project_conf['endpoint_name'])
+        project_conf['bucket_name'] = project_conf['bucket_name_tag'].lower().replace('_', '-')
+        project_conf['shared_bucket_name_tag'] = '{0}-{1}-shared-bucket'.format(
+            project_conf['service_base_name'], project_conf['endpoint_tag'])
+        project_conf['shared_bucket_name'] = project_conf['shared_bucket_name_tag'].lower().replace('_', '-')
+        project_conf['edge_role_name'] = '{}-{}-{}-edge-role'.format(project_conf['service_base_name'],
+                                                                     project_conf['project_name'],
+                                                                     project_conf['endpoint_name'])
+        project_conf['edge_role_profile_name'] = '{}-{}-{}-edge-profile'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['edge_policy_name'] = '{}-{}-{}-edge-policy'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['edge_security_group_name'] = '{}-{}-{}-edge-sg'.format(project_conf['service_base_name'],
+                                                                             project_conf['project_name'],
+                                                                             project_conf['endpoint_name'])
+        project_conf['notebook_instance_name'] = '{}-{}-{}-nb'.format(project_conf['service_base_name'],
+                                                                      project_conf['project_name'],
+                                                                      project_conf['endpoint_name'])
+        project_conf['dataengine_instances_name'] = '{}-{}-{}-de'.format(project_conf['service_base_name'],
+                                                                         project_conf['project_name'],
+                                                                         project_conf['endpoint_name'])
+        project_conf['notebook_dataengine_role_name'] = '{}-{}-{}-nb-de-role'.format(project_conf['service_base_name'],
+                                                                                     project_conf['project_name'],
+                                                                                     project_conf['endpoint_name'])
+        project_conf['notebook_dataengine_policy_name'] = '{}-{}-{}-nb-de-policy'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
+                                                                               project_conf['project_name'],
+                                                                               project_conf['endpoint_name'])
+        project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+        project_conf['private_subnet_name'] = '{0}-{1}-{2}-subnet'.format(project_conf['service_base_name'],
+                                                                          project_conf['project_name'],
+                                                                          project_conf['endpoint_name'])
+        project_conf['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['allowed_ip_cidr'] = list()
+        for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+            project_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ', '')})
+        project_conf['network_type'] = os.environ['conf_network_type']
+        project_conf['all_ip_cidr'] = '0.0.0.0/0'
+        project_conf['zone'] = os.environ['aws_region'] + os.environ['aws_zone']
+        project_conf['elastic_ip_name'] = '{0}-{1}-{2}-edge-static-ip'.format(project_conf['service_base_name'],
+                                                                              project_conf['project_name'],
+                                                                              project_conf['endpoint_name'])
+        project_conf['provision_instance_ip'] = None
+        project_conf['local_endpoint'] = False
         try:
-            local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
-                                                    project_conf['project_name']))
+            project_conf['provision_instance_ip'] = '{}/32'.format(dlab.meta_lib.get_instance_ip_address(
+                project_conf['tag_name'], '{0}-{1}-endpoint'.format(project_conf['service_base_name'],
+                                                                    project_conf['endpoint_name'])).get('Private'))
         except:
-            print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
-    except KeyError:
-        print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+            project_conf['provision_instance_ip'] = '{}/32'.format(dlab.meta_lib.get_instance_ip_address(
+                project_conf['tag_name'], '{0}-ssn'.format(project_conf['service_base_name'])).get('Private'))
+            project_conf['local_endpoint'] = True
+        if 'aws_user_predefined_s3_policies' not in os.environ:
+            os.environ['aws_user_predefined_s3_policies'] = 'None'
+
+        try:
+            if os.environ['conf_user_subnets_range'] == '':
+                raise KeyError
+        except KeyError:
+            os.environ['conf_user_subnets_range'] = ''
+
+        # FUSE in case of absence of user's key
+        try:
+            project_conf['user_key'] = os.environ['key']
+            try:
+                local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
+                                                        project_conf['project_name']))
+            except:
+                print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
+        except KeyError:
+            print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+            sys.exit(1)
+
+        print("Will create exploratory environment with edge node as access point as following: {}".
+              format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+        logging.info(json.dumps(project_conf))
+
+        if 'conf_additional_tags' in os.environ:
+            project_conf['bucket_additional_tags'] = ';' + os.environ['conf_additional_tags']
+            os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
+                                                 ';project_tag:{0};endpoint_tag:{1};'.format(
+                                                     project_conf['project_tag'], project_conf['endpoint_tag'])
+        else:
+            project_conf['bucket_additional_tags'] = ''
+            os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(project_conf['project_tag'],
+                                                                                           project_conf['endpoint_tag'])
+        print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
-    print("Will create exploratory environment with edge node as access point as following: {}".
-          format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
-    logging.info(json.dumps(project_conf))
-
-    if 'conf_additional_tags' in os.environ:
-        os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
-                                             ';project_tag:{0};endpoint_tag:{1};'.format(
-                                                 project_conf['project_tag'], project_conf['endpoint_tag'])
-    else:
-        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(project_conf['project_tag'],
-                                                                                       project_conf['endpoint_tag'])
-    print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
-
-    # attach project_tag and endpoint_tag to endpoint
-    try:
-        endpoint_id = get_instance_by_name(project_conf['tag_name'], project_conf['endpoint_name'])
-        print("Endpoint id: " + endpoint_id)
-        ec2 = boto3.client('ec2')
-        ec2.create_tags(Resources=[endpoint_id], Tags=[{'Key': 'project_tag', 'Value': project_conf['project_tag']},
-                                                       {'Key': 'endpoint_tag', 'Value': project_conf['endpoint_tag']}])
-    except Exception as err:
-        print("Failed to attach Project tag to Endpoint", str(err))
-#        traceback.print_exc()
-#        sys.exit(1)
+    if not project_conf['local_endpoint']:
+        # attach project_tag and endpoint_tag to endpoint
+        try:
+            endpoint_id = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'], '{0}-{1}-endpoint'.format(
+                project_conf['service_base_name'], project_conf['endpoint_name']))
+            print("Endpoint id: " + endpoint_id)
+            ec2 = boto3.client('ec2')
+            ec2.create_tags(Resources=[endpoint_id], Tags=[
+                {'Key': 'project_tag', 'Value': project_conf['project_tag']},
+                {'Key': 'endpoint_tag', 'Value': project_conf['endpoint_tag']}])
+        except Exception as err:
+            print("Failed to attach Project tag to Endpoint", str(err))
+            traceback.print_exc()
+            sys.exit(1)
 
     try:
         project_conf['vpc2_id'] = os.environ['aws_vpc2_id']
-        project_conf['tag_name'] = '{}-secondary-Tag'.format(project_conf['service_base_name'])
+        project_conf['tag_name'] = '{}-secondary-tag'.format(project_conf['service_base_name'])
     except KeyError:
         project_conf['vpc2_id'] = project_conf['vpc_id']
 
+
+
     try:
         logging.info('[CREATE SUBNET]')
         print('[CREATE SUBNET]')
         params = "--vpc_id '{}' --infra_tag_name {} --infra_tag_value {} --prefix {} " \
                  "--user_subnets_range '{}' --subnet_name {} --zone {}".format(
-            project_conf['vpc2_id'], project_conf['tag_name'], project_conf['service_base_name'],
-            project_conf['private_subnet_prefix'], os.environ['conf_user_subnets_range'],
-            project_conf['private_subnet_name'],
-            project_conf['zone'])
+                  project_conf['vpc2_id'], project_conf['tag_name'], project_conf['service_base_name'],
+                  project_conf['private_subnet_prefix'], os.environ['conf_user_subnets_range'],
+                  project_conf['private_subnet_name'],
+                  project_conf['zone'])
         try:
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create subnet.", str(err))
+        dlab.fab.append_result("Failed to create subnet.", str(err))
         sys.exit(1)
 
     tag = {"Key": project_conf['tag_name'],
-           "Value": "{0}-{1}-subnet".format(project_conf['service_base_name'], project_conf['project_name'])}
-    project_conf['private_subnet_cidr'] = get_subnet_by_tag(tag)
-    subnet_id = get_subnet_by_cidr(project_conf['private_subnet_cidr'])
-    print('subnet id: {}'.format(subnet_id))
-
+           "Value": "{0}-{1}-{2}-subnet".format(project_conf['service_base_name'], project_conf['project_name'],
+                                                project_conf['endpoint_name'])}
+    project_conf['private_subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+    subnet_id = dlab.meta_lib.get_subnet_by_cidr(project_conf['private_subnet_cidr'], project_conf['vpc2_id'])
+    print('Subnet id: {}'.format(subnet_id))
     print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
 
     try:
         logging.info('[CREATE EDGE ROLES]')
         print('[CREATE EDGE ROLES]')
-        user_tag = "{0}:{0}-{1}-edge-Role".format(project_conf['service_base_name'], project_conf['project_name'])
+        user_tag = "{0}:{0}-{1}-{2}-edge-role".format(project_conf['service_base_name'], project_conf['project_name'],
+                                                      project_conf['endpoint_name'])
         params = "--role_name {} --role_profile_name {} --policy_name {} --region {} --infra_tag_name {} " \
                  "--infra_tag_value {} --user_tag_value {}" \
                  .format(project_conf['edge_role_name'], project_conf['edge_role_profile_name'],
@@ -201,14 +228,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to creating roles.", str(err))
+        dlab.fab.append_result("Failed to creating roles.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATE BACKEND (NOTEBOOK) ROLES]')
         print('[CREATE BACKEND (NOTEBOOK) ROLES]')
-        user_tag = "{0}:{0}-{1}-nb-de-Role".format(project_conf['service_base_name'], project_conf['project_name'])
+        user_tag = "{0}:{0}-{1}-{2}-nb-de-role".format(project_conf['service_base_name'], project_conf['project_name'],
+                                                       project_conf['endpoint_name'])
         params = "--role_name {} --role_profile_name {} --policy_name {} --region {} --infra_tag_name {} " \
                  "--infra_tag_value {} --user_tag_value {}" \
                  .format(project_conf['notebook_dataengine_role_name'],
@@ -221,15 +248,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to creating roles.", str(err))
-        remove_all_iam_resources('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed to creating roles.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE SECURITY GROUP FOR EDGE NODE]')
         print('[CREATE SECURITY GROUPS FOR EDGE]')
-        edge_sg_ingress = format_sg([
+        edge_sg_ingress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": project_conf['private_subnet_cidr']}],
@@ -254,13 +280,19 @@
                 "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
             },
             {
+                "PrefixListIds": [],
+                "FromPort": 443,
+                "IpRanges": project_conf['allowed_ip_cidr'],
+                "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+            },
+            {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": project_conf['provision_instance_ip']}],
                 "UserIdGroupPairs": [],
                 "PrefixListIds": []
             }
         ])
-        edge_sg_egress = format_sg([
+        edge_sg_egress = dlab.meta_lib.format_sg([
             {
                 "PrefixListIds": [],
                 "FromPort": 22,
@@ -379,26 +411,26 @@
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed creating security group for edge node.", str(err))
+            dlab.fab.append_result("Failed creating security group for edge node.", str(err))
             raise Exception
 
         with hide('stderr', 'running', 'warnings'):
             print('Waiting for changes to propagate')
             time.sleep(10)
     except:
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE SECURITY GROUP FOR PRIVATE SUBNET]')
         print('[CREATE SECURITY GROUP FOR PRIVATE SUBNET]')
-        project_group_id = check_security_group(project_conf['edge_security_group_name'])
+        project_group_id = dlab.meta_lib.check_security_group(project_conf['edge_security_group_name'])
         sg_list = project_conf['sg_ids'].replace(" ", "").split(',')
         rules_list = []
         for i in sg_list:
             rules_list.append({"GroupId": i})
-        private_sg_ingress = format_sg([
+        private_sg_ingress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [],
@@ -419,7 +451,7 @@
             }
         ])
 
-        private_sg_egress = format_sg([
+        private_sg_egress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": project_conf['private_subnet_cidr']}],
@@ -464,12 +496,11 @@
             print('Waiting for changes to propagate')
             time.sleep(10)
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating security group for private subnet.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed creating security group for private subnet.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     logging.info('[CREATING SECURITY GROUPS FOR MASTER NODE]')
@@ -487,12 +518,11 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create sg.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed to create sg.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     logging.info('[CREATING SECURITY GROUPS FOR SLAVE NODES]')
@@ -510,62 +540,73 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create bucket.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed to create security group.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE BUCKETS]')
         print('[CREATE BUCKETS]')
-        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {} --bucket_name_tag {}" \
-                 .format(project_conf['bucket_name'], project_conf['tag_name'], project_conf['bucket_name'],
-                         project_conf['region'], project_conf['bucket_name_tag'])
+        project_conf['shared_bucket_tags'] = 'endpoint_tag:{0};{1}:{2};{3}:{4}{5}'.format(
+            project_conf['endpoint_tag'], os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
+            project_conf['tag_name'], project_conf['shared_bucket_name'],
+            project_conf['bucket_additional_tags']).replace(';', ',')
+        params = "--bucket_name {} --bucket_tags {} --region {} --bucket_name_tag {}". \
+            format(project_conf['shared_bucket_name'], project_conf['shared_bucket_tags'], project_conf['region'],
+                   project_conf['shared_bucket_name_tag'])
+        try:
+            local("~/scripts/{}.py {}".format('common_create_bucket', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+        project_conf['bucket_tags'] = 'endpoint_tag:{0};{1}:{2};project_tag:{3};{4}:{5}{6}'.format(
+            project_conf['endpoint_tag'], os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
+            project_conf['project_tag'], project_conf['tag_name'], project_conf['bucket_name'],
+            project_conf['bucket_additional_tags']).replace(';', ',')
+        params = "--bucket_name {} --bucket_tags {} --region {} --bucket_name_tag {}" \
+                 .format(project_conf['bucket_name'], project_conf['bucket_tags'], project_conf['region'],
+                         project_conf['bucket_name_tag'])
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create bucket.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed to create buckets.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING BUCKET POLICY FOR PROJECT INSTANCES]')
         print('[CREATING BUCKET POLICY FOR USER INSTANCES]')
-        params = '--bucket_name {} --ssn_bucket_name {} --shared_bucket_name {} --username {} --edge_role_name {} ' \
+        params = '--bucket_name {} --shared_bucket_name {} --username {} --edge_role_name {} ' \
                  '--notebook_role_name {} --service_base_name {} --region {} ' \
-                 '--user_predefined_s3_policies "{}"'.format(project_conf['bucket_name'],
-                                                             project_conf['ssn_bucket_name'],
-                                                             project_conf['shared_bucket_name'],
-                                                             os.environ['project_name'], project_conf['edge_role_name'],
-                                                             project_conf['notebook_dataengine_role_name'],
-                                                             project_conf['service_base_name'], project_conf['region'],
-                                                             os.environ['aws_user_predefined_s3_policies'])
+                 '--user_predefined_s3_policies "{}" --endpoint_name {}'.format(
+                  project_conf['bucket_name'], project_conf['shared_bucket_name'], project_conf['project_name'],
+                  project_conf['edge_role_name'], project_conf['notebook_dataengine_role_name'],
+                  project_conf['service_base_name'], project_conf['region'],
+                  os.environ['aws_user_predefined_s3_policies'], project_conf['endpoint_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_policy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create bucket policy.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed to create bucket policy.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+        dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
         sys.exit(1)
 
     try:
@@ -579,27 +620,27 @@
                     project_conf['edge_instance_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
-            edge_instance = get_instance_by_name(project_conf['tag_name'], project_conf['edge_instance_name'])
+            edge_instance = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+                                                               project_conf['edge_instance_name'])
         except:
             traceback.print_exc()
             raise Exception
-
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+        dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
         sys.exit(1)
 
     if project_conf['network_type'] == 'public':
         try:
             logging.info('[ASSOCIATING ELASTIC IP]')
             print('[ASSOCIATING ELASTIC IP]')
-            project_conf['edge_id'] = get_instance_by_name(project_conf['tag_name'], project_conf['edge_instance_name'])
+            project_conf['edge_id'] = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+                                                                         project_conf['edge_instance_name'])
             try:
                 project_conf['elastic_ip'] = os.environ['edge_elastic_ip']
             except:
@@ -613,19 +654,19 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to associate elastic ip.", str(err))
+            dlab.fab.append_result("Failed to associate elastic ip.", str(err))
             try:
-                project_conf['edge_public_ip'] = get_instance_ip_address(project_conf['tag_name'],
-                                                                      project_conf['edge_instance_name']).get('Public')
-                project_conf['allocation_id'] = get_allocation_id_by_elastic_ip(project_conf['edge_public_ip'])
+                project_conf['edge_public_ip'] = dlab.meta_lib.get_instance_ip_address(
+                    project_conf['tag_name'], project_conf['edge_instance_name']).get('Public')
+                project_conf['allocation_id'] = dlab.meta_lib.get_allocation_id_by_elastic_ip(
+                    project_conf['edge_public_ip'])
             except:
                 print("No Elastic IPs to release!")
-            remove_ec2(project_conf['tag_name'], project_conf['edge_instance_name'])
-            remove_all_iam_resources('notebook', os.environ['project_name'])
-            remove_all_iam_resources('edge', os.environ['project_name'])
-            remove_sgroups(project_conf['dataengine_instances_name'])
-            remove_sgroups(project_conf['notebook_instance_name'])
-            remove_sgroups(project_conf['edge_instance_name'])
-            remove_s3('edge', os.environ['project_name'])
-            sys.exit(1)
\ No newline at end of file
+            dlab.actions_lib.remove_ec2(project_conf['tag_name'], project_conf['edge_instance_name'])
+            dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+            dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+            dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+            dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+            dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+            dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
+            sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
index 4711b4f..3495b13 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
@@ -22,17 +22,22 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import traceback
+import logging
 import boto3
+import requests
 
 
-def terminate_edge_node(tag_name, project_name, tag_value, nb_sg, edge_sg, de_sg, emr_sg):
+def terminate_edge_node(tag_name, project_name, tag_value, nb_sg, edge_sg, de_sg, emr_sg, endpoint_name):
     print('Terminating EMR cluster')
     try:
-        clusters_list = get_emr_list(tag_name)
+        clusters_list = dlab.meta_lib.get_emr_list(tag_name)
         if clusters_list:
             for cluster_id in clusters_list:
                 client = boto3.client('emr')
@@ -40,89 +45,112 @@
                 cluster = cluster.get("Cluster")
                 emr_name = cluster.get('Name')
                 if '{}'.format(tag_value[:-1]) in emr_name:
-                    terminate_emr(cluster_id)
+                    dlab.actions_lib.terminate_emr(cluster_id)
                     print("The EMR cluster {} has been terminated successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
         sys.exit(1)
 
     print("Terminating EDGE and notebook instances")
     try:
-        remove_ec2(tag_name, tag_value)
-    except:
+        dlab.actions_lib.remove_ec2(tag_name, tag_value)
+    except Exception as err:
+        dlab.fab.append_result("Failed to terminate instances.", str(err))
         sys.exit(1)
 
     print("Removing s3 bucket")
     try:
-        remove_s3('edge', project_name)
-    except:
+        dlab.actions_lib.remove_s3('edge', project_name)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove buckets.", str(err))
         sys.exit(1)
 
     print("Removing IAM roles and profiles")
     try:
-        remove_all_iam_resources('notebook', project_name)
-        remove_all_iam_resources('edge', project_name)
-    except:
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_name, endpoint_name)
+        dlab.actions_lib.remove_all_iam_resources('edge', project_name, endpoint_name)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove IAM roles and profiles.", str(err))
+        sys.exit(1)
+
+    print("Deregistering project specific notebook's AMI")
+    try:
+        dlab.actions_lib.deregister_image(project_name)
+    except Exception as err:
+        dlab.fab.append_result("Failed to deregister images.", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        remove_sgroups(emr_sg)
-        remove_sgroups(de_sg)
-        remove_sgroups(nb_sg)
-        remove_sgroups(edge_sg)
-    except:
+        dlab.actions_lib.remove_sgroups(emr_sg)
+        dlab.actions_lib.remove_sgroups(de_sg)
+        dlab.actions_lib.remove_sgroups(nb_sg)
+        dlab.actions_lib.remove_sgroups(edge_sg)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove Security Groups.", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        remove_subnets(tag_value)
-    except:
+        dlab.actions_lib.remove_subnets(tag_value)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove subnets.", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/project/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     project_conf = dict()
-    project_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    project_conf['endpoint_name'] = '{}-{}-endpoint'.format(project_conf['service_base_name'], os.environ['endpoint_name'])
+    project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     project_conf['project_name'] = os.environ['project_name']
-    project_conf['tag_name'] = project_conf['service_base_name'] + '-Tag'
-    project_conf['tag_value'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-*'
-    project_conf['edge_sg'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-edge'
-    project_conf['nb_sg'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-nb'
+    project_conf['endpoint_name'] = os.environ['endpoint_name']
+    project_conf['endpoint_instance_name'] = '{}-{}-endpoint'.format(project_conf['service_base_name'],
+                                                                     project_conf['endpoint_name'])
+    project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
+    project_conf['tag_value'] = '{}-{}-{}-*'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                                    project_conf['endpoint_name'])
+    project_conf['edge_sg'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                                     project_conf['endpoint_name'])
+    project_conf['nb_sg'] = '{}-{}-{}-nb'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                                 project_conf['endpoint_name'])
     project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
-                                                                os.environ['project_name'], os.environ['endpoint_name'])
-    project_conf['de_sg'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + \
-                                             '-dataengine*'
-    project_conf['emr_sg'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-des-*'
+                                                                project_conf['project_name'],
+                                                                project_conf['endpoint_name'])
+    project_conf['de_sg'] = '{}-{}-{}-de*'.format(project_conf['service_base_name'],
+                                                  project_conf['project_name'],
+                                                  project_conf['endpoint_name'])
+    project_conf['emr_sg'] = '{}-{}-{}-des-*'.format(project_conf['service_base_name'],
+                                                     project_conf['project_name'],
+                                                     project_conf['endpoint_name'])
 
     try:
         logging.info('[TERMINATE PROJECT]')
         print('[TERMINATE PROJECT]')
         try:
             terminate_edge_node(project_conf['tag_name'], project_conf['project_name'], project_conf['tag_value'],
-                                project_conf['nb_sg'], project_conf['edge_sg'], project_conf['de_sg'], project_conf['emr_sg'])
+                                project_conf['nb_sg'], project_conf['edge_sg'], project_conf['de_sg'],
+                                project_conf['emr_sg'], project_conf['endpoint_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate project.", str(err))
+            dlab.fab.append_result("Failed to terminate project.", str(err))
     except Exception as err:
         print('Error: {0}'.format(err))
         sys.exit(1)
 
     try:
-        endpoint_id = get_instance_by_name(project_conf['tag_name'], project_conf['endpoint_name'])
+        endpoint_id = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+                                                         project_conf['endpoint_instance_name'])
         print("Endpoint id: " + endpoint_id)
         ec2 = boto3.client('ec2')
         ec2.delete_tags(Resources=[endpoint_id], Tags=[{'Key': 'project_tag'}, {'Key': 'endpoint_tag'}])
@@ -132,12 +160,51 @@
 #        sys.exit(1)
 
     try:
+        print('[KEYCLOAK PROJECT CLIENT DELETE]')
+        logging.info('[KEYCLOAK PROJECT CLIENT DELETE]')
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            os.environ['keycloak_auth_server_url'])
+        keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+                                                                           os.environ['keycloak_realm_name'])
+
+        keycloak_auth_data = {
+            "username": os.environ['keycloak_user'],
+            "password": os.environ['keycloak_user_password'],
+            "grant_type": "password",
+            "client_id": "admin-cli",
+        }
+
+        client_params = {
+            "clientId": '{}-{}-{}'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                          project_conf['endpoint_name'])
+        }
+
+        keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
+
+        keycloak_get_id_client = requests.get(keycloak_client_url, data=keycloak_auth_data, params=client_params,
+                                              headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
+                                                       "Content-Type": "application/json"})
+        json_keycloak_client_id = json.loads(keycloak_get_id_client.text)
+        keycloak_id_client = json_keycloak_client_id[0]['id']
+
+        keycloak_client_delete_url = '{0}/admin/realms/{1}/clients/{2}'.format(os.environ['keycloak_auth_server_url'],
+                                                                               os.environ['keycloak_realm_name'],
+                                                                               keycloak_id_client)
+
+        keycloak_client = requests.delete(
+            keycloak_client_delete_url,
+            headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+                     "Content-Type": "application/json"})
+    except Exception as err:
+        print("Failed to remove project client from Keycloak", str(err))
+
+    try:
         with open("/root/result.json", 'w') as result:
             res = {"service_base_name": project_conf['service_base_name'],
                    "project_name": project_conf['project_name'],
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
index 15f2ba3..dd2a93c 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -43,70 +45,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['rstudio_pass'] = id_generator()
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -114,9 +124,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -133,9 +142,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -151,9 +159,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring R_STUDIO and all dependencies
@@ -163,7 +170,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
             .format(instance_hostname, keyfile_name,
                     os.environ['aws_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -175,9 +182,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure rstudio.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure rstudio.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -193,9 +199,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -206,12 +211,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -227,9 +231,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -240,103 +243,110 @@
             'tensor': False
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio', notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(`notebook_config['expected_image_name']`)
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    rstudio_dns_url = "http://" + dns_name + ":8787/"
-    rstudio_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio URL: {}".format(rstudio_dns_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        rstudio_dns_url = "http://" + dns_name + ":8787/"
+        rstudio_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio URL: {}".format(rstudio_dns_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "RStudio",
-                    "url": rstudio_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_acces_url}#,
-                   #{"description": "RStudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "RStudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "RStudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
index 5798a02..bb8c555 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
@@ -21,12 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os, json
+import logging
+import sys
+import os
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import traceback
+import json
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -34,84 +38,108 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
+
+    ssn_conf = dict()
+    ssn_conf['instance'] = 'ssn'
+
+    def clear_resources():
+        if ssn_conf['domain_created']:
+            dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'],
+                                                    os.environ['ssn_hosted_zone_name'],
+                                                    os.environ['ssn_subdomain'])
+        dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+        dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+        dlab.actions_lib.remove_s3(ssn_conf['instance'])
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
+            try:
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+            except:
+                print("There are no VPC Endpoints")
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
 
     try:
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-        service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
-        role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
-        policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
-        ssn_bucket_name_tag = service_base_name + '-ssn-bucket'
-        default_endpoint_name = os.environ['default_endpoint_name']
-        shared_bucket_name_tag = '{0}-{1}-shared-bucket'.format(service_base_name, default_endpoint_name)
-        ssn_bucket_name = ssn_bucket_name_tag.lower().replace('_', '-')
-        shared_bucket_name = shared_bucket_name_tag.lower().replace('_', '-')
-        tag_name = service_base_name + '-Tag'
-        tag2_name = service_base_name + '-secondary-Tag'
-        instance_name = service_base_name + '-ssn'
-        region = os.environ['aws_region']
-        ssn_image_name = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
-        ssn_ami_id = get_ami_id(ssn_image_name)
-        policy_path = '/root/files/ssn_policy.json'
-        vpc_cidr = os.environ['conf_vpc_cidr']
-        vpc2_cidr = os.environ['conf_vpc2_cidr']
-        sg_name = instance_name + '-sg'
-        pre_defined_vpc = False
-        pre_defined_subnet = False
-        pre_defined_sg = False
-        billing_enabled = True
-        dlab_ssh_user = os.environ['conf_os_user']
-        network_type = os.environ['conf_network_type']
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and \
                 'ssn_subdomain' in os.environ:
-            domain_created = True
+            ssn_conf['domain_created'] = True
         else:
-            domain_created = False
+            ssn_conf['domain_created'] = False
+        ssn_conf['pre_defined_vpc'] = False
+        ssn_conf['pre_defined_subnet'] = False
+        ssn_conf['pre_defined_sg'] = False
+        ssn_conf['billing_enabled'] = True
+        ssn_conf['role_name'] = '{}-ssn-role'.format(ssn_conf['service_base_name'])
+        ssn_conf['role_profile_name'] = '{}-ssn-profile'.format(ssn_conf['service_base_name'])
+        ssn_conf['policy_name'] = '{}-ssn-policy'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag_name'] = '{}-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag2_name'] = '{}-secondary-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['user_tag'] = "{0}:{0}-ssn-role".format(ssn_conf['service_base_name'])
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['region'] = os.environ['aws_region']
+        ssn_conf['ssn_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+        ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+        ssn_conf['sg_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+        ssn_conf['network_type'] = os.environ['conf_network_type']
+        ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
 
         try:
             if os.environ['aws_vpc_id'] == '':
                 raise KeyError
         except KeyError:
-            tag = {"Key": tag_name, "Value": "{}-subnet".format(service_base_name)}
-            os.environ['aws_vpc_id'] = get_vpc_by_tag(tag_name, service_base_name)
-            pre_defined_vpc = True
+            ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+            os.environ['aws_vpc_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'], ssn_conf['service_base_name'])
+            ssn_conf['pre_defined_vpc'] = True
         try:
             if os.environ['aws_subnet_id'] == '':
                 raise KeyError
         except KeyError:
-            tag = {"Key": tag_name, "Value": "{}-subnet".format(service_base_name)}
-            os.environ['aws_subnet_id'] = get_subnet_by_tag(tag, True)
-            pre_defined_subnet = True
+            ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+            os.environ['aws_subnet_id'] = dlab.meta_lib.get_subnet_by_tag(ssn_conf['tag'], True)
+            ssn_conf['pre_defined_subnet'] = True
         try:
             if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
                 raise KeyError
         except KeyError:
-            tag = {"Key": tag2_name, "Value": "{}-subnet".format(service_base_name)}
-            os.environ['aws_vpc2_id'] = get_vpc_by_tag(tag2_name, service_base_name)
-            pre_defined_vpc2 = True
+            ssn_conf['tag'] = {"Key": ssn_conf['tag2_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+            os.environ['aws_vpc2_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
+                                                                     ssn_conf['service_base_name'])
+            ssn_conf['pre_defined_vpc2'] = True
         try:
             if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_peering_id']:
                 raise KeyError
         except KeyError:
-            os.environ['aws_peering_id'] = get_peering_by_tag(tag_name, service_base_name)
-            pre_defined_peering = True
+            os.environ['aws_peering_id'] = dlab.meta_lib.get_peering_by_tag(ssn_conf['tag_name'],
+                                                                            ssn_conf['service_base_name'])
+            ssn_conf['pre_defined_peering'] = True
         try:
             if os.environ['aws_security_groups_ids'] == '':
                 raise KeyError
         except KeyError:
-            os.environ['aws_security_groups_ids'] = get_security_group_by_name(sg_name)
-            pre_defined_sg = True
+            os.environ['aws_security_groups_ids'] = dlab.meta_lib.get_security_group_by_name(ssn_conf['sg_name'])
+            ssn_conf['pre_defined_sg'] = True
         try:
             if os.environ['aws_account_id'] == '':
                 raise KeyError
             if os.environ['aws_billing_bucket'] == '':
                 raise KeyError
         except KeyError:
-            billing_enabled = False
-        if not billing_enabled:
+            ssn_conf['billing_enabled'] = False
+        if not ssn_conf['billing_enabled']:
             os.environ['aws_account_id'] = 'None'
             os.environ['aws_billing_bucket'] = 'None'
         try:
@@ -120,27 +148,41 @@
         except KeyError:
             os.environ['aws_report_path'] = ''
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            ssn_conf['initial_user'] = 'ubuntu'
+            ssn_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            ssn_conf['initial_user'] = 'ec2-user'
+            ssn_conf['sudo_group'] = 'wheel'
 
-        if network_type == 'private':
-            instance_hostname = get_instance_ip_address(tag_name, instance_name).get('Private')
+        if ssn_conf['network_type'] == 'private':
+            ssn_conf['instance_hostname'] = dlab.meta_lib.get_instance_ip_address(
+                ssn_conf['tag_name'], ssn_conf['instance_name']).get('Private')
         else:
-            instance_hostname = get_instance_hostname(tag_name, instance_name)
+            ssn_conf['instance_hostname'] = dlab.meta_lib.get_instance_hostname(
+                ssn_conf['tag_name'], ssn_conf['instance_name'])
+
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            ssn_conf['step_cert_sans'] = ' --san {0} '.format(dlab.meta_lib.get_instance_ip_address(
+                ssn_conf['tag_name'], ssn_conf['instance_name']).get('Private'))
+            if ssn_conf['network_type'] == 'public':
+                ssn_conf['step_cert_sans'] += ' --san {0} --san {1}'.format(
+                    dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'], ssn_conf['instance_name']),
+                    dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+                                                          ssn_conf['instance_name']).get('Public'))
+        else:
+            ssn_conf['step_cert_sans'] = ''
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             dlab_ssh_user, sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            ssn_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            ssn_conf['initial_user'], ssn_conf['dlab_ssh_user'], ssn_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -148,30 +190,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -179,8 +199,8 @@
         print('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
         params = "--hostname {} --keyfile {} --pip_packages 'boto3 backoff argparse fabric==1.14.0 awscli pymongo " \
                  "pyyaml jinja2' --user {} --region {}". \
-            format(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", dlab_ssh_user,
-                   os.environ['aws_region'])
+            format(ssn_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+                   ssn_conf['dlab_ssh_user'], os.environ['aws_region'])
 
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
@@ -188,44 +208,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed installing software: pip, packages.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE SSN INSTANCE]')
         print('[CONFIGURE SSN INSTANCE]')
-        additional_config = {"nginx_template_dir": "/root/templates/", "service_base_name": service_base_name,
+        additional_config = {"nginx_template_dir": "/root/templates/", "service_base_name":
+                             ssn_conf['service_base_name'],
                              "security_group_id": os.environ['aws_security_groups_ids'],
                              "vpc_id": os.environ['aws_vpc_id'], "subnet_id": os.environ['aws_subnet_id'],
                              "admin_key": os.environ['conf_key_name']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
-                 "--tag_resource_id {}".format(instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'],
-                                                                                    os.environ['conf_key_name']),
-                                               json.dumps(additional_config), dlab_ssh_user,
-                                               os.environ['ssn_dlab_path'], os.environ['conf_tag_resource_id'])
+                 "--tag_resource_id {} --step_cert_sans '{}' ".format(
+                  ssn_conf['instance_hostname'],
+                  "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+                  json.dumps(additional_config), ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
+                  os.environ['conf_tag_resource_id'], ssn_conf['step_cert_sans'])
 
         try:
             local("~/scripts/{}.py {}".format('configure_ssn_node', params))
@@ -233,30 +233,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed configuring ssn.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Failed configuring ssn.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -270,15 +248,16 @@
                              {"name": "zeppelin", "tag": "latest"},
                              {"name": "tensor", "tag": "latest"},
                              {"name": "tensor-rstudio", "tag": "latest"},
+                             {"name": "jupyterlab", "tag": "latest"},
                              {"name": "deeplearning", "tag": "latest"},
                              {"name": "dataengine-service", "tag": "latest"},
                              {"name": "dataengine", "tag": "latest"}]
         params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
-                 "--cloud_provider {} --region {}".format(instance_hostname,
+                 "--cloud_provider {} --region {}".format(ssn_conf['instance_hostname'],
                                                           "{}{}.pem".format(os.environ['conf_key_dir'],
                                                                             os.environ['conf_key_name']),
                                                           json.dumps(additional_config), os.environ['conf_os_family'],
-                                                          dlab_ssh_user, os.environ['ssn_dlab_path'],
+                                                          ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
                                                           os.environ['conf_cloud_provider'], os.environ['aws_region'])
 
         try:
@@ -287,61 +266,16 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Unable to configure docker.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Unable to configure docker.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        # mongo_parameters = {
-        #     "aws_region": os.environ['aws_region'],
-        #     "aws_vpc_id": os.environ['aws_vpc_id'],
-        #     "aws_subnet_id": os.environ['aws_subnet_id'],
-        #     "conf_service_base_name": service_base_name,
-        #     "aws_security_groups_ids": os.environ['aws_security_groups_ids'].replace(" ", ""),
-        #     "conf_os_family": os.environ['conf_os_family'],
-        #     "conf_tag_resource_id": os.environ['conf_tag_resource_id'],
-        #     "conf_key_dir": os.environ['conf_key_dir'],
-        #     "ssn_instance_size": os.environ['aws_ssn_instance_size'],
-        #     "edge_instance_size": os.environ['aws_edge_instance_size']
-        # }
-        # if os.environ['conf_duo_vpc_enable'] == 'true':
-        #     secondary_parameters = {
-        #         "aws_notebook_vpc_id": os.environ['aws_vpc2_id'],
-        #         "aws_notebook_subnet_id": os.environ['aws_subnet_id'],
-        #         "aws_peering_id": os.environ['aws_peering_id']
-        #     }
-        # else:
-        #     secondary_parameters = {
-        #         "aws_notebook_vpc_id": os.environ['aws_vpc_id'],
-        #         "aws_notebook_subnet_id": os.environ['aws_subnet_id'],
-        #     }
-        # mongo_parameters.update(secondary_parameters)
         cloud_params = [
             {
                 'key': 'KEYCLOAK_REDIRECT_URI',
-                'value': "http://{0}/".format(get_instance_hostname(tag_name, instance_name))
+                'value': "https://{0}/".format(dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'],
+                                                                                   ssn_conf['instance_name']))
             },
             {
                 'key': 'KEYCLOAK_REALM_NAME',
@@ -360,6 +294,14 @@
                 'value': os.environ['keycloak_client_secret']
             },
             {
+                'key': 'KEYCLOAK_USER_NAME',
+                'value': os.environ['keycloak_user']
+            },
+            {
+                'key': 'KEYCLOAK_PASSWORD',
+                'value': os.environ['keycloak_user_password']
+            },
+            {
                 'key': 'CONF_OS',
                 'value': os.environ['conf_os_family']
             },
@@ -452,8 +394,8 @@
                 'value': os.environ['conf_image_enabled']
             },
             {
-                'key': 'SHARED_IMAGE_ENABLED',
-                'value': os.environ['conf_shared_image_enabled']
+                'key': "AZURE_AUTH_FILE_PATH",
+                'value': ""
             }
         ]
         if os.environ['conf_duo_vpc_enable'] == 'true':
@@ -488,6 +430,58 @@
                     'key': 'PEERING_ID',
                     'value': ''
                 })
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            cloud_params.append(
+                {
+                    'key': 'STEP_CERTS_ENABLED',
+                    'value': os.environ['conf_stepcerts_enabled']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_ROOT_CA',
+                    'value': os.environ['conf_stepcerts_root_ca']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_ID',
+                    'value': os.environ['conf_stepcerts_kid']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_PASSWORD',
+                    'value': os.environ['conf_stepcerts_kid_password']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_CA_URL',
+                    'value': os.environ['conf_stepcerts_ca_url']
+                })
+        else:
+            cloud_params.append(
+                {
+                    'key': 'STEP_CERTS_ENABLED',
+                    'value': 'false'
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_ROOT_CA',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_ID',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_PASSWORD',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_CA_URL',
+                    'value': ''
+                })
         logging.info('[CONFIGURE SSN INSTANCE UI]')
         print('[CONFIGURE SSN INSTANCE UI]')
         params = "--hostname {} " \
@@ -515,15 +509,18 @@
                  "--cost {} " \
                  "--resource_id {} " \
                  "--default_endpoint_name {} " \
-                 "--tags {}". \
-            format(instance_hostname,
+                 "--tags {} " \
+                 "--keycloak_client_id {} " \
+                 "--keycloak_client_secret {} " \
+                 "--keycloak_auth_server_url {}". \
+            format(ssn_conf['instance_hostname'],
                    "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
                    os.environ['ssn_dlab_path'],
-                   dlab_ssh_user,
+                   ssn_conf['dlab_ssh_user'],
                    os.environ['conf_os_family'],
                    os.environ['request_id'],
                    os.environ['conf_resource'],
-                   service_base_name,
+                   ssn_conf['service_base_name'],
                    os.environ['conf_tag_resource_id'],
                    os.environ['conf_billing_tag'],
                    os.environ['conf_cloud_provider'],
@@ -531,7 +528,7 @@
                    os.environ['aws_billing_bucket'],
                    os.environ['aws_job_enabled'],
                    os.environ['aws_report_path'],
-                   billing_enabled,
+                   ssn_conf['billing_enabled'],
                    json.dumps(cloud_params),
                    os.environ['dlab_id'],
                    os.environ['usage_date'],
@@ -541,64 +538,46 @@
                    os.environ['cost'],
                    os.environ['resource_id'],
                    os.environ['default_endpoint_name'],
-                   os.environ['tags'])
+                   os.environ['tags'],
+                   os.environ['keycloak_client_name'],
+                   os.environ['keycloak_client_secret'],
+                   os.environ['keycloak_auth_server_url'])
         try:
             local("~/scripts/{}.py {}".format('configure_ui', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Unable to configure UI.", str(err))
-        print(err)
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Unable to configure UI.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
-        print("Service base name: {}".format(service_base_name))
-        print("SSN Name: {}".format(instance_name))
-        print("SSN Hostname: {}".format(instance_hostname))
-        print("Role name: {}".format(role_name))
-        print("Role profile name: {}".format(role_profile_name))
-        print("Policy name: {}".format(policy_name))
+        print("Service base name: {}".format(ssn_conf['service_base_name']))
+        print("SSN Name: {}".format(ssn_conf['instance_name']))
+        print("SSN Hostname: {}".format(ssn_conf['instance_hostname']))
+        print("Role name: {}".format(ssn_conf['role_name']))
+        print("Role profile name: {}".format(ssn_conf['role_profile_name']))
+        print("Policy name: {}".format(ssn_conf['policy_name']))
         print("Key name: {}".format(os.environ['conf_key_name']))
         print("VPC ID: {}".format(os.environ['aws_vpc_id']))
         print("Subnet ID: {}".format(os.environ['aws_subnet_id']))
         print("Security IDs: {}".format(os.environ['aws_security_groups_ids']))
         print("SSN instance shape: {}".format(os.environ['aws_ssn_instance_size']))
-        print("SSN AMI name: {}".format(ssn_image_name))
-        print("SSN bucket name: {}".format(ssn_bucket_name))
-        print("Shared bucket name: {}".format(shared_bucket_name))
-        print("Region: {}".format(region))
-        jenkins_url = "http://{}/jenkins".format(get_instance_hostname(tag_name, instance_name))
-        jenkins_url_https = "https://{}/jenkins".format(get_instance_hostname(tag_name, instance_name))
-        print("Jenkins URL: {}".format(jenkins_url))
-        print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
-        print("DLab UI HTTP URL: http://{}".format(get_instance_hostname(tag_name, instance_name)))
-        print("DLab UI HTTPS URL: https://{}".format(get_instance_hostname(tag_name, instance_name)))
+        print("SSN AMI name: {}".format(ssn_conf['ssn_image_name']))
+        print("Region: {}".format(ssn_conf['region']))
+        ssn_conf['jenkins_url'] = "http://{}/jenkins".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name']))
+        ssn_conf['jenkins_url_https'] = "https://{}/jenkins".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name']))
+        print("Jenkins URL: {}".format(ssn_conf['jenkins_url']))
+        print("Jenkins URL HTTPS: {}".format(ssn_conf['jenkins_url_https']))
+        print("DLab UI HTTP URL: http://{}".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name'])))
+        print("DLab UI HTTPS URL: https://{}".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name'])))
         try:
             with open('jenkins_creds.txt') as f:
                 print(f.read())
@@ -606,26 +585,26 @@
             print("Jenkins is either configured already or have issues in configuration routine.")
 
         with open("/root/result.json", 'w') as f:
-            res = {"service_base_name": service_base_name,
-                   "instance_name": instance_name,
-                   "instance_hostname": get_instance_hostname(tag_name, instance_name),
-                   "role_name": role_name,
-                   "role_profile_name": role_profile_name,
-                   "policy_name": policy_name,
+            res = {"service_base_name": ssn_conf['service_base_name'],
+                   "instance_name": ssn_conf['instance_name'],
+                   "instance_hostname": dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'],
+                                                                            ssn_conf['instance_name']),
+                   "role_name": ssn_conf['role_name'],
+                   "role_profile_name": ssn_conf['role_profile_name'],
+                   "policy_name": ssn_conf['policy_name'],
                    "master_keyname": os.environ['conf_key_name'],
                    "vpc_id": os.environ['aws_vpc_id'],
                    "subnet_id": os.environ['aws_subnet_id'],
                    "security_id": os.environ['aws_security_groups_ids'],
                    "instance_shape": os.environ['aws_ssn_instance_size'],
-                   "bucket_name": ssn_bucket_name,
-                   "shared_bucket_name": shared_bucket_name,
-                   "region": region,
+                   "region": ssn_conf['region'],
                    "action": "Create SSN instance"}
             f.write(json.dumps(res))
 
         print('Upload response file')
         params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
-            format(instance_name, local_log_filepath, dlab_ssh_user, instance_hostname)
+            format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'],
+                   ssn_conf['instance_hostname'])
         local("~/scripts/{}.py {}".format('upload_response_file', params))
 
         logging.info('[FINALIZE]')
@@ -634,28 +613,7 @@
         if os.environ['conf_lifecycle_stage'] == 'prod':
             params += "--key_id {}".format(os.environ['aws_access_key'])
             local("~/scripts/{}.py {}".format('ssn_finalize', params))
-    except:
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
index 9acf264..45c65f2 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
@@ -21,337 +21,349 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import logging
+import sys
+import os
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
+import json
 
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
-    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
-    pre_defined_vpc = False
-    pre_defined_subnet = False
-    pre_defined_sg = False
-    pre_defined_vpc2 = False
+    ssn_conf = dict()
+    ssn_conf['instance'] = 'ssn'
+    ssn_conf['pre_defined_vpc'] = False
+    ssn_conf['pre_defined_subnet'] = False
+    ssn_conf['pre_defined_sg'] = False
+    ssn_conf['pre_defined_vpc2'] = False
     try:
         logging.info('[CREATE AWS CONFIG FILE]')
         print('[CREATE AWS CONFIG FILE]')
         if 'aws_access_key' in os.environ and 'aws_secret_access_key' in os.environ:
-            create_aws_config_files(generate_full_config=True)
+            dlab.actions_lib.create_aws_config_files(generate_full_config=True)
         else:
-            create_aws_config_files()
+            dlab.actions_lib.create_aws_config_files()
     except Exception as err:
-        print('Error: {0}'.format(err))
         logging.info('Unable to create configuration')
-        append_result("Unable to create configuration")
+        dlab.fab.append_result("Unable to create configuration", err)
         traceback.print_exc()
         sys.exit(1)
 
     try:
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-        service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
-        role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
-        policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
-        ssn_bucket_name_tag = service_base_name + '-ssn-bucket'
-        default_endpoint_name = os.environ['default_endpoint_name']
-        shared_bucket_name_tag = '{0}-{1}-shared-bucket'.format(service_base_name, default_endpoint_name)
-        ssn_bucket_name = ssn_bucket_name_tag.lower().replace('_', '-')
-        shared_bucket_name = shared_bucket_name_tag.lower().replace('_', '-')
-        tag_name = service_base_name + '-Tag'
-        tag2_name = service_base_name + '-secondary-Tag'
-        user_tag = "{0}:{0}-ssn-Role".format(service_base_name)
-        instance_name = service_base_name + '-ssn'
-        region = os.environ['aws_region']
-        zone_full = os.environ['aws_region'] + os.environ['aws_zone']
-        ssn_image_name = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
-        ssn_ami_id = get_ami_id(ssn_image_name)
-        policy_path = '/root/files/ssn_policy.json'
-        vpc_cidr = os.environ['conf_vpc_cidr']
-        vpc2_cidr = os.environ['conf_vpc2_cidr']
-        vpc_name = '{}-VPC'.format(service_base_name)
-        vpc2_name = '{}-secondary-VPC'.format(service_base_name)
-        subnet_name = '{}-subnet'.format(service_base_name)
-        allowed_ip_cidr = list()
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        ssn_conf['role_name'] = '{}-ssn-role'.format(ssn_conf['service_base_name'])
+        ssn_conf['role_profile_name'] = '{}-ssn-profile'.format(ssn_conf['service_base_name'])
+        ssn_conf['policy_name'] = '{}-ssn-policy'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag_name'] = '{}-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag2_name'] = '{}-secondary-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['user_tag'] = "{0}:{0}-ssn-role".format(ssn_conf['service_base_name'])
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['region'] = os.environ['aws_region']
+        ssn_conf['zone_full'] = os.environ['aws_region'] + os.environ['aws_zone']
+        ssn_conf['ssn_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+        ssn_conf['ssn_ami_id'] = dlab.meta_lib.get_ami_id(ssn_conf['ssn_image_name'])
+        ssn_conf['policy_path'] = '/root/files/ssn_policy.json'
+        ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        ssn_conf['vpc2_cidr'] = os.environ['conf_vpc2_cidr']
+        ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
+        ssn_conf['vpc2_name'] = '{}-vpc2'.format(ssn_conf['service_base_name'])
+        ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+        ssn_conf['allowed_ip_cidr'] = list()
         for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-            allowed_ip_cidr.append({"CidrIp": cidr.replace(' ','')})
-        sg_name = instance_name + '-sg'
-        network_type = os.environ['conf_network_type']
-        all_ip_cidr = '0.0.0.0/0'
-        elastic_ip_name = '{0}-ssn-EIP'.format(service_base_name)
+            ssn_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ','')})
+        ssn_conf['sg_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+        ssn_conf['network_type'] = os.environ['conf_network_type']
+        ssn_conf['all_ip_cidr'] = '0.0.0.0/0'
+        ssn_conf['elastic_ip_name'] = '{0}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
 
-        if get_instance_by_name(tag_name, instance_name):
-            print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+    if dlab.meta_lib.get_instance_by_name(ssn_conf['tag_name'], ssn_conf['instance_name']):
+        print("Service base name should be unique and less or equal 20 symbols. Please try again.")
+        sys.exit(1)
+
+    try:
+        if not os.environ['aws_vpc_id']:
+            raise KeyError
+    except KeyError:
+        try:
+            ssn_conf['pre_defined_vpc'] = True
+            logging.info('[CREATE VPC AND ROUTE TABLE]')
+            print('[CREATE VPC AND ROUTE TABLE]')
+            params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --vpc_name {}".format(
+                ssn_conf['vpc_cidr'], ssn_conf['region'], ssn_conf['tag_name'], ssn_conf['service_base_name'],
+                ssn_conf['vpc_name'])
+            try:
+                local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            os.environ['aws_vpc_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'],
+                                                                    ssn_conf['service_base_name'])
+        except Exception as err:
+            dlab.fab.append_result("Failed to create VPC", str(err))
             sys.exit(1)
 
+    ssn_conf['allowed_vpc_cidr_ip_ranges'] = list()
+    for cidr in dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_vpc_id']):
+        ssn_conf['allowed_vpc_cidr_ip_ranges'].append({"CidrIp": cidr})
+
+    try:
+        if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
+            raise KeyError
+    except KeyError:
         try:
-            if not os.environ['aws_vpc_id']:
-                raise KeyError
-        except KeyError:
+            ssn_conf['pre_defined_vpc2'] = True
+            logging.info('[CREATE SECONDARY VPC AND ROUTE TABLE]')
+            print('[CREATE SECONDARY VPC AND ROUTE TABLE]')
+            params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary " \
+                     "--vpc_name {}".format(ssn_conf['vpc2_cidr'], ssn_conf['region'], ssn_conf['tag2_name'],
+                                            ssn_conf['service_base_name'], ssn_conf['vpc2_name'])
             try:
-                pre_defined_vpc = True
-                logging.info('[CREATE VPC AND ROUTE TABLE]')
-                print('[CREATE VPC AND ROUTE TABLE]')
-                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --vpc_name {}".format(
-                    vpc_cidr, region, tag_name, service_base_name, vpc_name)
+                local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            os.environ['aws_vpc2_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
+                                                                     ssn_conf['service_base_name'])
+        except Exception as err:
+            dlab.fab.append_result("Failed to create secondary VPC.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            sys.exit(1)
+
+    try:
+        if os.environ['aws_subnet_id'] == '':
+            raise KeyError
+    except KeyError:
+        try:
+            ssn_conf['pre_defined_subnet'] = True
+            logging.info('[CREATE SUBNET]')
+            print('[CREATE SUBNET]')
+            params = "--vpc_id {0} --username {1} --infra_tag_name {2} --infra_tag_value {3} --prefix {4} " \
+                     "--ssn {5} --zone {6} --subnet_name {7}".format(
+                      os.environ['aws_vpc_id'], 'ssn', ssn_conf['tag_name'],ssn_conf['service_base_name'], '20',
+                      True, ssn_conf['zone_full'], ssn_conf['subnet_name'])
+            try:
+                local("~/scripts/{}.py {}".format('common_create_subnet', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            with open('/tmp/ssn_subnet_id', 'r') as f:
+                os.environ['aws_subnet_id'] = f.read()
+            dlab.actions_lib.enable_auto_assign_ip(os.environ['aws_subnet_id'])
+        except Exception as err:
+            dlab.fab.append_result("Failed to create Subnet.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
                 try:
-                    local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+                    dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                os.environ['aws_vpc_id'] = get_vpc_by_tag(tag_name, service_base_name)
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create VPC. Exception:" + str(err))
-                sys.exit(1)
-
-        allowed_vpc_cidr_ip_ranges = list()
-        for cidr in get_vpc_cidr_by_id(os.environ['aws_vpc_id']):
-            allowed_vpc_cidr_ip_ranges.append({"CidrIp": cidr})
-
-        try:
-            if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
-                raise KeyError
-        except KeyError:
-            try:
-                pre_defined_vpc2 = True
-                logging.info('[CREATE SECONDARY VPC AND ROUTE TABLE]')
-                print('[CREATE SECONDARY VPC AND ROUTE TABLE]')
-                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary " \
-                         "--vpc_name {}".format(vpc2_cidr, region, tag2_name, service_base_name, vpc2_name)
+                    print("Subnet hasn't been created.")
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
                 try:
-                    local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                os.environ['aws_vpc2_id'] = get_vpc_by_tag(tag2_name, service_base_name)
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create secondary VPC. Exception:" + str(err))
-                if pre_defined_vpc:
-                    remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                    remove_route_tables(tag_name, True)
-                    remove_vpc(os.environ['aws_vpc_id'])
-                sys.exit(1)
+                    print("There are no VPC Endpoints")
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+            sys.exit(1)
 
+    try:
+        if os.environ['conf_duo_vpc_enable'] == 'true' and os.environ['aws_vpc_id'] and os.environ['aws_vpc2_id']:
+            raise KeyError
+    except KeyError:
         try:
-            if os.environ['aws_subnet_id'] == '':
-                raise KeyError
-        except KeyError:
-            try:
-                pre_defined_subnet = True
-                logging.info('[CREATE SUBNET]')
-                print('[CREATE SUBNET]')
-                params = "--vpc_id {0} --username {1} --infra_tag_name {2} --infra_tag_value {3} --prefix {4} " \
-                         "--ssn {5} --zone {6} --subnet_name {7}".format(os.environ['aws_vpc_id'], 'ssn', tag_name,
-                                                             service_base_name, '20', True, zone_full, subnet_name)
+            logging.info('[CREATE PEERING CONNECTION]')
+            print('[CREATE PEERING CONNECTION]')
+            os.environ['aws_peering_id'] = dlab.actions_lib.create_peering_connection(
+                os.environ['aws_vpc_id'], os.environ['aws_vpc2_id'], ssn_conf['service_base_name'])
+            print('PEERING CONNECTION ID:' + os.environ['aws_peering_id'])
+            dlab.actions_lib.create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'],
+                                                os.environ['aws_peering_id'],
+                                                dlab.meta_lib.get_cidr_by_vpc(os.environ['aws_vpc2_id']))
+        except Exception as err:
+            dlab.fab.append_result("Failed to create peering connection.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
                 try:
-                    local("~/scripts/{}.py {}".format('common_create_subnet', params))
+                    dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                with open('/tmp/ssn_subnet_id', 'r') as f:
-                    os.environ['aws_subnet_id'] = f.read()
-                enable_auto_assign_ip(os.environ['aws_subnet_id'])
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create Subnet.", str(err))
-                if pre_defined_vpc:
-                    remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                    remove_route_tables(tag_name, True)
-                    try:
-                        remove_subnets(service_base_name + "-subnet")
-                    except:
-                        print("Subnet hasn't been created.")
-                    remove_vpc(os.environ['aws_vpc_id'])
-                if pre_defined_vpc2:
-                    try:
-                        remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-                    except:
-                        print("There are no VPC Endpoints")
-                    remove_route_tables(tag2_name, True)
-                    remove_vpc(os.environ['aws_vpc2_id'])
-                sys.exit(1)
-
-        try:
-            if os.environ['conf_duo_vpc_enable'] == 'true' and os.environ['aws_vpc_id'] and os.environ['aws_vpc2_id']:
-                raise KeyError
-        except KeyError:
-            try:
-                logging.info('[CREATE PEERING CONNECTION]')
-                print('[CREATE PEERING CONNECTION]')
-                os.environ['aws_peering_id'] = create_peering_connection(os.environ['aws_vpc_id'],
-                                                                         os.environ['aws_vpc2_id'], service_base_name)
-                print('PEERING CONNECTION ID:' + os.environ['aws_peering_id'])
-                create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'], os.environ['aws_peering_id'],
-                                   get_cidr_by_vpc(os.environ['aws_vpc2_id']))
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create peering connection.", str(err))
-                if pre_defined_vpc:
-                    remove_route_tables(tag_name, True)
-                    try:
-                        remove_subnets(service_base_name + "-subnet")
-                    except:
-                        print("Subnet hasn't been created.")
-                    remove_vpc(os.environ['aws_vpc_id'])
-                if pre_defined_vpc2:
-                    remove_peering('*')
-                    try:
-                        remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-                    except:
-                        print("There are no VPC Endpoints")
-                    remove_route_tables(tag2_name, True)
-                    remove_vpc(os.environ['aws_vpc2_id'])
-                sys.exit(1)
-
-        try:
-            if os.environ['aws_security_groups_ids'] == '':
-                raise KeyError
-        except KeyError:
-            try:
-                pre_defined_sg = True
-                logging.info('[CREATE SG FOR SSN]')
-                print('[CREATE SG FOR SSN]')
-                ingress_sg_rules_template = format_sg([
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 80,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 22,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 443,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": -1,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": -1, "IpProtocol": "icmp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 80,
-                        "IpRanges": allowed_vpc_cidr_ip_ranges,
-                        "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 443,
-                        "IpRanges": allowed_vpc_cidr_ip_ranges,
-                        "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    }
-                ])
-                egress_sg_rules_template = format_sg([
-                    {"IpProtocol": "-1", "IpRanges": [{"CidrIp": all_ip_cidr}], "UserIdGroupPairs": [], "PrefixListIds": []}
-                ])
-                params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
-                         "--infra_tag_value {} --force {} --ssn {}". \
-                    format(sg_name, os.environ['aws_vpc_id'], json.dumps(ingress_sg_rules_template),
-                           json.dumps(egress_sg_rules_template), service_base_name, tag_name, False, True)
+                    print("Subnet hasn't been created.")
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    local("~/scripts/{}.py {}".format('common_create_security_group', params))
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                with open('/tmp/ssn_sg_id', 'r') as f:
-                    os.environ['aws_security_groups_ids'] = f.read()
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed creating security group for SSN.", str(err))
-                if pre_defined_vpc:
-                    remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                    remove_subnets(service_base_name + "-subnet")
-                    remove_route_tables(tag_name, True)
-                    remove_vpc(os.environ['aws_vpc_id'])
-                if pre_defined_vpc2:
-                    remove_peering('*')
-                    try:
-                        remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-                    except:
-                        print("There are no VPC Endpoints")
-                    remove_route_tables(tag2_name, True)
-                    remove_vpc(os.environ['aws_vpc2_id'])
-                sys.exit(1)
+                    print("There are no VPC Endpoints")
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+            sys.exit(1)
+
+    try:
+        if os.environ['aws_security_groups_ids'] == '':
+            raise KeyError
+    except KeyError:
+        try:
+            ssn_conf['pre_defined_sg'] = True
+            logging.info('[CREATE SG FOR SSN]')
+            print('[CREATE SG FOR SSN]')
+            ssn_conf['ingress_sg_rules_template'] = dlab.meta_lib.format_sg([
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 80,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 22,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 443,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": -1,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": -1, "IpProtocol": "icmp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 80,
+                    "IpRanges": ssn_conf['allowed_vpc_cidr_ip_ranges'],
+                    "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 443,
+                    "IpRanges": ssn_conf['allowed_vpc_cidr_ip_ranges'],
+                    "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                }
+            ])
+            egress_sg_rules_template = dlab.meta_lib.format_sg([
+                {"IpProtocol": "-1", "IpRanges": [{"CidrIp": ssn_conf['all_ip_cidr']}], "UserIdGroupPairs": [],
+                 "PrefixListIds": []}
+            ])
+            params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
+                     "--infra_tag_value {} --force {} --ssn {}". \
+                format(ssn_conf['sg_name'], os.environ['aws_vpc_id'],
+                       json.dumps(ssn_conf['ingress_sg_rules_template']), json.dumps(egress_sg_rules_template),
+                       ssn_conf['service_base_name'], ssn_conf['tag_name'], False, True)
+            try:
+                local("~/scripts/{}.py {}".format('common_create_security_group', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            with open('/tmp/ssn_sg_id', 'r') as f:
+                os.environ['aws_security_groups_ids'] = f.read()
+        except Exception as err:
+            dlab.gab_lib.append_result("Failed creating security group for SSN.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
+                try:
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                except:
+                    print("There are no VPC Endpoints")
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+            sys.exit(1)
+
+    try:
         logging.info('[CREATE ROLES]')
         print('[CREATE ROLES]')
         params = "--role_name {} --role_profile_name {} --policy_name {} --policy_file_name {} --region {} " \
                  "--infra_tag_name {} --infra_tag_value {} --user_tag_value {}".\
-            format(role_name, role_profile_name, policy_name, policy_path, os.environ['aws_region'], tag_name,
-                   service_base_name, user_tag)
+            format(ssn_conf['role_name'], ssn_conf['role_profile_name'], ssn_conf['policy_name'],
+                   ssn_conf['policy_path'], os.environ['aws_region'], ssn_conf['tag_name'],
+                   ssn_conf['service_base_name'], ssn_conf['user_tag'])
         try:
             local("~/scripts/{}.py {}".format('common_create_role_policy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create roles.", str(err))
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
+        dlab.fab.append_result("Unable to create roles.", str(err))
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
             try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
             except:
                 print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE ENDPOINT AND ROUTE-TABLE]')
         print('[CREATE ENDPOINT AND ROUTE-TABLE]')
         params = "--vpc_id {} --region {} --infra_tag_name {} --infra_tag_value {}".format(
-            os.environ['aws_vpc_id'], os.environ['aws_region'], tag_name, service_base_name)
+            os.environ['aws_vpc_id'], os.environ['aws_region'], ssn_conf['tag_name'], ssn_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('ssn_create_endpoint', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create an endpoint.", str(err))
-        remove_all_iam_resources(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
+        dlab.fab.append_result("Unable to create an endpoint.", str(err))
+        dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
             try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
             except:
                 print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
         sys.exit(1)
 
     if os.environ['conf_duo_vpc_enable'] == 'true':
@@ -359,84 +371,44 @@
             logging.info('[CREATE ENDPOINT AND ROUTE-TABLE FOR NOTEBOOK VPC]')
             print('[CREATE ENDPOINT AND ROUTE-TABLE FOR NOTEBOOK VPC]')
             params = "--vpc_id {} --region {} --infra_tag_name {} --infra_tag_value {}".format(
-                os.environ['aws_vpc2_id'], os.environ['aws_region'], tag2_name, service_base_name)
+                os.environ['aws_vpc2_id'], os.environ['aws_region'], ssn_conf['tag2_name'],
+                ssn_conf['service_base_name'])
             try:
                 local("~/scripts/{}.py {}".format('ssn_create_endpoint', params))
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Unable to create secondary endpoint.", str(err))
-            remove_all_iam_resources(instance)
-            if pre_defined_sg:
-                remove_sgroups(tag_name)
-            if pre_defined_subnet:
-                remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                remove_subnets(service_base_name + "-subnet")
-            if pre_defined_vpc:
-                remove_route_tables(tag_name, True)
-                remove_vpc(os.environ['aws_vpc_id'])
-            if pre_defined_vpc2:
-                remove_peering('*')
+            dlab.fab.append_result("Unable to create secondary endpoint.", str(err))
+            dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+            if ssn_conf['pre_defined_sg']:
+                dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+            if ssn_conf['pre_defined_subnet']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
                     print("There are no VPC Endpoints")
-                remove_route_tables(tag2_name, True)
-                remove_vpc(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
             sys.exit(1)
-    try:
-        logging.info('[CREATE BUCKETS]')
-        print('[CREATE BUCKETS]')
-        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {} --bucket_name_tag {}". \
-                 format(ssn_bucket_name, tag_name, ssn_bucket_name, region, ssn_bucket_name_tag)
-
-        try:
-            local("~/scripts/{}.py {}".format('common_create_bucket', params))
-        except:
-            traceback.print_exc()
-            raise Exception
-
-        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {} --bucket_name_tag {}". \
-                 format(shared_bucket_name, tag_name, shared_bucket_name, region, shared_bucket_name_tag)
-
-        try:
-            local("~/scripts/{}.py {}".format('common_create_bucket', params))
-        except:
-            traceback.print_exc()
-            raise Exception
-    except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create bucket.", str(err))
-        remove_all_iam_resources(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
-        sys.exit(1)
 
     try:
         logging.info('[CREATE SSN INSTANCE]')
         print('[CREATE SSN INSTANCE]')
-        params = "--node_name {0} --ami_id {1} --instance_type {2} --key_name {3} --security_group_ids {4} --subnet_id {5} " \
-                 "--iam_profile {6} --infra_tag_name {7} --infra_tag_value {8} --instance_class {9} --primary_disk_size {10}".\
-            format(instance_name, ssn_ami_id, os.environ['aws_ssn_instance_size'], os.environ['conf_key_name'],
-                   os.environ['aws_security_groups_ids'], os.environ['aws_subnet_id'],
-                   role_profile_name, tag_name, instance_name, 'ssn', '20')
+        params = "--node_name {0} --ami_id {1} --instance_type {2} --key_name {3} --security_group_ids {4} " \
+                 "--subnet_id {5} --iam_profile {6} --infra_tag_name {7} --infra_tag_value {8} --instance_class {9} " \
+                 "--primary_disk_size {10}".\
+            format(ssn_conf['instance_name'], ssn_conf['ssn_ami_id'], os.environ['aws_ssn_instance_size'],
+                   os.environ['conf_key_name'], os.environ['aws_security_groups_ids'], os.environ['aws_subnet_id'],
+                   ssn_conf['role_profile_name'], ssn_conf['tag_name'], ssn_conf['instance_name'], 'ssn', '20')
 
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
@@ -444,107 +416,112 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create ssn instance.", str(err))
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
+        dlab.fab.append_result("Unable to create ssn instance.", str(err))
+        dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+        dlab.actions_lib.remove_s3(ssn_conf['instance'])
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
             try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
             except:
                 print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
         sys.exit(1)
 
-    if network_type == 'public':
+    if ssn_conf['network_type'] == 'public':
         try:
             logging.info('[ASSOCIATING ELASTIC IP]')
             print('[ASSOCIATING ELASTIC IP]')
-            ssn_id = get_instance_by_name(tag_name, instance_name)
+            ssn_conf['ssn_id'] = dlab.meta_lib.get_instance_by_name(ssn_conf['tag_name'], ssn_conf['instance_name'])
             try:
-                elastic_ip = os.environ['ssn_elastic_ip']
+                ssn_conf['elastic_ip'] = os.environ['ssn_elastic_ip']
             except:
-                elastic_ip = 'None'
+                ssn_conf['elastic_ip'] = 'None'
             params = "--elastic_ip {} --ssn_id {} --infra_tag_name {} --infra_tag_value {}".format(
-                elastic_ip, ssn_id, tag_name, elastic_ip_name)
+                ssn_conf['elastic_ip'], ssn_conf['ssn_id'], ssn_conf['tag_name'], ssn_conf['elastic_ip_name'])
             try:
                 local("~/scripts/{}.py {}".format('ssn_associate_elastic_ip', params))
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to associate elastic ip.", str(err))
-            remove_ec2(tag_name, instance_name)
-            remove_all_iam_resources(instance)
-            remove_s3(instance)
-            if pre_defined_sg:
-                remove_sgroups(tag_name)
-            if pre_defined_subnet:
-                remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                remove_subnets(service_base_name + "-subnet")
-            if pre_defined_vpc:
-                remove_vpc_endpoints(os.environ['aws_vpc_id'])
-                remove_route_tables(tag_name, True)
-                remove_vpc(os.environ['aws_vpc_id'])
-            if pre_defined_vpc2:
-                remove_peering('*')
+            dlab.fab.append_result("Failed to associate elastic ip.", str(err))
+            dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+            dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+            dlab.actions_lib.remove_s3(ssn_conf['instance'])
+            if ssn_conf['pre_defined_sg']:
+                dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+            if ssn_conf['pre_defined_subnet']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
                     print("There are no VPC Endpoints")
-                remove_route_tables(tag2_name, True)
-                remove_vpc(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
             sys.exit(1)
 
-    if network_type == 'private':
-        instance_ip = get_instance_ip_address(tag_name, instance_name).get('Private')
+    if ssn_conf['network_type'] == 'private':
+        ssn_conf['instance_ip'] = dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+                                                                        ssn_conf['instance_name']).get('Private')
     else:
-        instance_ip = get_instance_ip_address(tag_name, instance_name).get('Public')
+        ssn_conf['instance_ip'] = dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+                                                                        ssn_conf['instance_name']).get('Public')
 
     if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and 'ssn_subdomain' in os.environ:
         try:
             logging.info('[CREATING ROUTE53 RECORD]')
             print('[CREATING ROUTE53 RECORD]')
             try:
-                create_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                       os.environ['ssn_subdomain'], instance_ip)
+                dlab.actions_lib.create_route_53_record(os.environ['ssn_hosted_zone_id'],
+                                                        os.environ['ssn_hosted_zone_name'],
+                                                        os.environ['ssn_subdomain'], ssn_conf['instance_ip'])
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            append_result("Failed to create route53 record.", str(err))
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
+            dlab.fab.append_result("Failed to create route53 record.", str(err))
+            dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'],
+                                                    os.environ['ssn_hosted_zone_name'],
                                    os.environ['ssn_subdomain'])
-            remove_ec2(tag_name, instance_name)
-            remove_all_iam_resources(instance)
-            remove_s3(instance)
-            if pre_defined_sg:
-                remove_sgroups(tag_name)
-            if pre_defined_subnet:
-                remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                remove_subnets(service_base_name + "-subnet")
-            if pre_defined_vpc:
-                remove_vpc_endpoints(os.environ['aws_vpc_id'])
-                remove_route_tables(tag_name, True)
-                remove_vpc(os.environ['aws_vpc_id'])
-            if pre_defined_vpc2:
-                remove_peering('*')
+            dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+            dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+            dlab.actions_lib.remove_s3(ssn_conf['instance'])
+            if ssn_conf['pre_defined_sg']:
+                dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+            if ssn_conf['pre_defined_subnet']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
                     print("There are no VPC Endpoints")
-                remove_route_tables(tag2_name, True)
-                remove_vpc(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
             sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
index 76a119d..975e8d3 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
@@ -21,11 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import sys
+import os
+import logging
+import traceback
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import json
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -35,17 +40,17 @@
                         filename=local_log_filepath)
     # generating variables dictionary
     if 'aws_access_key' in os.environ and 'aws_secret_access_key' in os.environ:
-        create_aws_config_files(generate_full_config=True)
+        dlab.actions_lib.create_aws_config_files(generate_full_config=True)
     else:
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     ssn_conf = dict()
-    ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    ssn_conf['tag_name'] = ssn_conf['service_base_name'] + '-Tag'
+    ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+    ssn_conf['tag_name'] = ssn_conf['service_base_name'] + '-tag'
     ssn_conf['edge_sg'] = ssn_conf['service_base_name'] + "*" + '-edge'
     ssn_conf['nb_sg'] = ssn_conf['service_base_name'] + "*" + '-nb'
-    ssn_conf['de_sg'] = ssn_conf['service_base_name'] + "*" + '-dataengine*'
+    ssn_conf['de_sg'] = ssn_conf['service_base_name'] + "*" + '-de*'
     ssn_conf['de-service_sg'] = ssn_conf['service_base_name'] + "*" + '-des-*'
 
     try:
@@ -61,7 +66,7 @@
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to terminate ssn.", str(err))
+        dlab.fab.append_result("Failed to terminate ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -70,6 +75,6 @@
                    "Action": "Terminate ssn with all service_base_name environment"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
index 7aa6629..27b5913 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
@@ -21,12 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
 import boto3
 import argparse
 import sys
-from dlab.ssn_lib import *
 import os
 
 parser = argparse.ArgumentParser()
@@ -37,7 +38,7 @@
 parser.add_argument('--service_base_name', type=str)
 parser.add_argument('--de_se_sg', type=str)
 args = parser.parse_args()
-tag2 = args.service_base_name + '-secondary-Tag'
+tag2 = args.service_base_name + '-secondary-tag'
 
 ##############
 # Run script #
@@ -46,120 +47,129 @@
 if __name__ == "__main__":
     print('Terminating EMR cluster')
     try:
-        clusters_list = get_emr_list(args.tag_name)
+        clusters_list = dlab.meta_lib.get_emr_list(args.tag_name)
         if clusters_list:
             for cluster_id in clusters_list:
                 client = boto3.client('emr')
                 cluster = client.describe_cluster(ClusterId=cluster_id)
                 cluster = cluster.get("Cluster")
                 emr_name = cluster.get('Name')
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
         sys.exit(1)
 
     print("Deregistering notebook's AMI")
     try:
-        deregister_image()
-    except:
+        dlab.actions_lib.deregister_image()
+    except Exception as err:
+        dlab.fab.append_result("Failed to deregister images.", str(err))
         sys.exit(1)
 
     print("Terminating EC2 instances")
     try:
-        remove_ec2(args.tag_name, '*')
-    except:
+        dlab.actions_lib.remove_ec2(args.tag_name, '*')
+    except Exception as err:
+        dlab.fab.append_result("Failed to terminate instances.", str(err))
         sys.exit(1)
 
     if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and 'ssn_subdomain' in os.environ:
         print("Removing Route53 records")
-        remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                               os.environ['ssn_subdomain'])
+        dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
+                                                os.environ['ssn_subdomain'])
 
     print("Removing security groups")
     try:
-        remove_sgroups(args.de_se_sg)
-        remove_sgroups(args.de_sg)
-        remove_sgroups(args.nb_sg)
-        remove_sgroups(args.edge_sg)
+        dlab.actions_lib.remove_sgroups(args.de_se_sg)
+        dlab.actions_lib.remove_sgroups(args.de_sg)
+        dlab.actions_lib.remove_sgroups(args.nb_sg)
+        dlab.actions_lib.remove_sgroups(args.edge_sg)
         try:
-            remove_sgroups(args.tag_name)
+            dlab.actions_lib.remove_sgroups(args.tag_name)
         except:
             print("There is no pre-defined SSN SG")
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove security groups.", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        remove_subnets('*')
-    except:
+        dlab.actions_lib.remove_subnets('*')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove subnets.", str(err))
         sys.exit(1)
 
     print("Removing peering connection")
     try:
-        remove_peering('*')
-    except:
+        dlab.actions_lib.remove_peering('*')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove peering connections.", str(err))
         sys.exit(1)
 
     print("Removing s3 buckets")
     try:
-        remove_s3()
-    except:
+        dlab.actions_lib.remove_s3()
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove buckets.", str(err))
         sys.exit(1)
 
     print("Removing IAM roles, profiles and policies")
     try:
-        remove_all_iam_resources('all')
-    except:
+        dlab.actions_lib.remove_all_iam_resources('all')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove IAM roles, profiles and policies.", str(err))
         sys.exit(1)
 
     print("Removing route tables")
     try:
-        remove_route_tables(args.tag_name)
-        remove_route_tables(tag2)
-    except:
+        dlab.actions_lib.remove_route_tables(args.tag_name)
+        dlab.actions_lib.remove_route_tables(tag2)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove route tables.", str(err))
         sys.exit(1)
 
     print("Removing SSN subnet")
     try:
-        remove_subnets(args.service_base_name + '-subnet')
-    except:
-        print("There is no pre-defined SSN Subnet")
+        dlab.actions_lib.remove_subnets(args.service_base_name + '-subnet')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove SSN subnet.", str(err))
+        sys.exit(1)
 
     print("Removing SSN VPC")
     try:
-        vpc_id = get_vpc_by_tag(args.tag_name, args.service_base_name)
+        vpc_id = dlab.meta_lib.get_vpc_by_tag(args.tag_name, args.service_base_name)
         if vpc_id != '':
             try:
-                remove_vpc_endpoints(vpc_id)
+                dlab.actions_lib.remove_vpc_endpoints(vpc_id)
             except:
                 print("There is no such VPC Endpoint")
             try:
-                remove_internet_gateways(vpc_id, args.tag_name, args.service_base_name)
+                dlab.actions_lib.remove_internet_gateways(vpc_id, args.tag_name, args.service_base_name)
             except:
                 print("There is no such Internet gateway")
-            remove_route_tables(args.tag_name, True)
-            remove_vpc(vpc_id)
+            dlab.actions_lib.remove_route_tables(args.tag_name, True)
+            dlab.actions_lib.remove_vpc(vpc_id)
         else:
             print("There is no pre-defined SSN VPC")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove SSN VPC.", str(err))
         sys.exit(1)
 
     print("Removing notebook VPC")
     try:
-        vpc_id = get_vpc_by_tag(tag2, args.service_base_name)
+        vpc_id = dlab.meta_lib.get_vpc_by_tag(tag2, args.service_base_name)
         if vpc_id != '':
             try:
-                remove_vpc_endpoints(vpc_id)
+                dlab.actions_lib.remove_vpc_endpoints(vpc_id)
             except:
                 print("There is no such VPC Endpoint")
-            remove_route_tables(tag2, True)
-            remove_vpc(vpc_id)
+            dlab.actions_lib.remove_route_tables(tag2, True)
+            dlab.actions_lib.remove_vpc(vpc_id)
         else:
             print("There is no pre-defined notebook VPC")
     except Exception as err:
-        print('Error: {0}'.format(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to remove wecondary VPC.", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
index d1b42f9..6baaf45 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -44,74 +46,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'],
-                                                            notebook_config['instance_name']).get('Private')
-    tag = {"Key": notebook_config['tag_name'],
-           "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
-    notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
-    notebook_config['rstudio_pass'] = id_generator()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -119,9 +125,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -138,9 +143,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -156,9 +160,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and RSTUDIO and all dependencies
@@ -168,7 +171,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
             .format(instance_hostname, keyfile_name,
                     os.environ['aws_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -180,9 +183,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure tensoflow-rstudio.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure tensoflow-rstudio.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -198,9 +200,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -211,12 +212,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -232,9 +232,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -245,110 +244,121 @@
             'tensor': True
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio', notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
+                print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    rstudio_dns_url = "http://" + dns_name + ":8787/"
-    rstudio_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_acces_url = "http://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio URL: {}".format(rstudio_dns_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        rstudio_dns_url = "http://" + dns_name + ":8787/"
+        rstudio_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+                                                                notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio URL: {}".format(rstudio_dns_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "RStudio",
-                    "url": rstudio_notebook_acces_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_acces_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_acces_url}#,
-                   #{"description": "RStudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "RStudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "RStudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
index 365f014..3cf3a46 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -44,72 +46,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    tag = {"Key": notebook_config['tag_name'],
-           "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
-    notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -117,9 +124,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -136,9 +142,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -154,9 +159,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and all dependencies
@@ -165,7 +169,7 @@
         print('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
         params = "--hostname {0} --keyfile {1} " \
                  "--region {2} --os_user {3} " \
-                 "--ip_adress {4} --exploratory_name {5} --edge_ip {6}" \
+                 "--ip_address {4} --exploratory_name {5} --edge_ip {6}" \
                  .format(instance_hostname, keyfile_name,
                          os.environ['aws_region'], notebook_config['dlab_ssh_user'],
                          notebook_config['ip_address'], notebook_config['exploratory_name'], edge_ip)
@@ -175,9 +179,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure TensorFlow.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -193,9 +196,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -206,12 +208,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -227,9 +228,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -240,104 +240,114 @@
             'tensor': True
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'jupyter',notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'jupyter',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
+                print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+                                                                notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_acces_url = "http://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
index 8853f9d..dbdae70 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -44,77 +46,83 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-
-    region = os.environ['aws_region']
-    if region == 'us-east-1':
-        endpoint_url = 'https://s3.amazonaws.com'
-    elif region == 'cn-north-1':
-        endpoint_url = "https://s3.{}.amazonaws.com.cn".format(region)
-    else:
-        endpoint_url = 'https://s3-{}.amazonaws.com'.format(region)
-
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config['region'] = os.environ['aws_region']
+        if notebook_config['region'] == 'us-east-1':
+            notebook_config['endpoint_url'] = 'https://s3.amazonaws.com'
+        elif notebook_config['region'] == 'cn-north-1':
+            notebook_config['endpoint_url'] = "https://s3.{}.amazonaws.com.cn".format(notebook_config['region'])
+        else:
+            notebook_config['endpoint_url'] = 'https://s3-{}.amazonaws.com'.format(notebook_config['region'])
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -122,9 +130,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -141,9 +148,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -160,8 +166,8 @@
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring zeppelin and all dependencies
@@ -169,7 +175,8 @@
         logging.info('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
         print('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
         additional_config = {"frontend_hostname": edge_instance_hostname,
-                             "backend_hostname": get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name']),
+                             "backend_hostname": dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                     notebook_config['instance_name']),
                              "backend_port": "8080",
                              "nginx_template_dir": "/root/templates/"}
         params = "--hostname {0} --instance_name {1} " \
@@ -180,13 +187,13 @@
                  "--zeppelin_version {10} --scala_version {11} " \
                  "--livy_version {12} --multiple_clusters {13} " \
                  "--r_mirror {14} --endpoint_url {15} " \
-                 "--ip_adress {16} --exploratory_name {17} --edge_ip {18}" \
+                 "--ip_address {16} --exploratory_name {17} --edge_ip {18}" \
             .format(instance_hostname, notebook_config['instance_name'], keyfile_name, os.environ['aws_region'],
                     json.dumps(additional_config), notebook_config['dlab_ssh_user'], os.environ['notebook_spark_version'],
                     os.environ['notebook_hadoop_version'], edge_instance_hostname, '3128',
                     os.environ['notebook_zeppelin_version'], os.environ['notebook_scala_version'],
                     os.environ['notebook_livy_version'], os.environ['notebook_multiple_clusters'],
-                    os.environ['notebook_r_mirror'], endpoint_url, notebook_config['ip_address'],
+                    os.environ['notebook_r_mirror'], notebook_config['endpoint_url'], notebook_config['ip_address'],
                     notebook_config['exploratory_name'], edge_ip)
         try:
             local("~/scripts/{}.py {}".format('configure_zeppelin_node', params))
@@ -194,9 +201,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure zeppelin.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -212,9 +218,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -225,12 +230,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
     
     try:
@@ -246,9 +250,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -259,99 +262,106 @@
             'tensor': False
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'zeppelin', notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'zeppelin',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(`notebook_config['expected_image_name']`)
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
+                print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        zeppelin_ip_url = "http://" + ip_address + ":8080/"
+        zeppelin_dns_url = "http://" + dns_name + ":8080/"
+        zeppelin_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                               notebook_config['exploratory_name'])
+        zeppelin_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                  notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Zeppelin URL: {}".format(zeppelin_ip_url))
+        print("Zeppelin URL: {}".format(zeppelin_dns_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    zeppelin_ip_url = "http://" + ip_address + ":8080/"
-    zeppelin_dns_url = "http://" + dns_name + ":8080/"
-    zeppelin_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    zeppelin_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("Zeppelin URL: {}".format(zeppelin_ip_url))
-    print("Zeppelin URL: {}".format(zeppelin_dns_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Apache Zeppelin",
-                    "url": zeppelin_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": zeppelin_ungit_acces_url}#,
-                   #{"description": "Apache Zeppelin (via tunnel)",
-                   # "url": zeppelin_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Apache Zeppelin",
+                        "url": zeppelin_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": zeppelin_ungit_access_url}#,
+                       #{"description": "Apache Zeppelin (via tunnel)",
+                       # "url": zeppelin_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py b/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
index cb7073a..295e191 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
@@ -82,7 +82,7 @@
                                                                             args.security_group_name,
                                                                             json.loads(args.tags),
                                                                             args.public_ip_name)
-                disk = AzureMeta().get_disk(args.resource_group_name, '{}-disk0'.format(
+                disk = AzureMeta().get_disk(args.resource_group_name, '{}-volume-primary'.format(
                     args.instance_name))
                 if disk:
                     create_option = 'attach'
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py b/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
index 73c17c3..5746fb8 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
@@ -21,76 +21,82 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import traceback
 import sys
 import json
+from fabric.api import *
 
 
 if __name__ == "__main__":
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         image_conf = dict()
         image_conf['service_base_name'] = os.environ['conf_service_base_name']
         image_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-        image_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        image_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        image_conf['project_tag'] = os.environ['project_name'].replace('_', '-')
-        image_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-')
-        image_conf['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
+        image_conf['user_name'] = os.environ['edge_user_name']
+        image_conf['project_name'] = os.environ['project_name']
+        image_conf['project_tag'] = image_conf['project_name']
+        image_conf['endpoint_name'] = os.environ['endpoint_name']
+        image_conf['endpoint_tag'] = image_conf['endpoint_name']
         image_conf['instance_name'] = os.environ['notebook_instance_name']
         image_conf['application'] = os.environ['application']
         image_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-        image_conf['image_name'] = os.environ['notebook_image_name'].lower().replace('_', '-')
-        image_conf['full_image_name'] = '{}-{}-{}-{}'.format(image_conf['service_base_name'],
-                                                             image_conf['project_name'],
-                                                             image_conf['application'],
-                                                             image_conf['image_name']).lower()
-        image_conf['tags'] = {"Name": image_conf['service_base_name'],
+        image_conf['image_name'] = os.environ['notebook_image_name']
+        image_conf['full_image_name'] = '{}-{}-{}-{}-{}'.format(image_conf['service_base_name'],
+                                                                image_conf['project_name'],
+                                                                image_conf['endpoint_name'],
+                                                                image_conf['application'],
+                                                                image_conf['image_name'])
+        image_conf['tags'] = {"Instance_Name": image_conf['instance_name'],
                               "SBN": image_conf['service_base_name'],
                               "User": image_conf['user_name'],
                               "project_tag": image_conf['project_tag'],
                               "endpoint_tag": image_conf['endpoint_tag'],
                               "Image": image_conf['image_name'],
-                              "FIN": image_conf['full_image_name'],
+                              "Name": image_conf['full_image_name'],
                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
 
-        instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
-                                                               image_conf['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
+                                                             image_conf['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(image_conf['service_base_name'],
                                                        image_conf['project_name'],
                                                        image_conf['endpoint_name'])
-        edge_instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
+        edge_instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
                                                                     edge_instance_name)
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
 
-        instance = AzureMeta().get_instance(image_conf['resource_group_name'], image_conf['instance_name'])
+        instance = AzureMeta.get_instance(image_conf['resource_group_name'], image_conf['instance_name'])
         os.environ['azure_notebook_instance_size'] = instance.hardware_profile.vm_size
         os.environ['exploratory_name'] = instance.tags['Exploratory']
-        os.environ['notebook_image_name'] = image_conf['full_image_name']
+        os.environ['notebook_image_name'] = image_conf['image_name']
 
-        image = AzureMeta().get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+        image = AzureMeta.get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
         if image == '':
             print('Creating image from existing notebook.')
-            prepare_vm_for_image(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
-            AzureActions().create_image_from_instance(image_conf['resource_group_name'],
-                                                      image_conf['instance_name'],
-                                                      os.environ['azure_region'],
-                                                      image_conf['full_image_name'],
-                                                      json.dumps(image_conf['tags']))
+            dlab.actions_lib.prepare_vm_for_image(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
+            AzureActions.create_image_from_instance(image_conf['resource_group_name'],
+                                                    image_conf['instance_name'],
+                                                    os.environ['azure_region'],
+                                                    image_conf['full_image_name'],
+                                                    json.dumps(image_conf['tags']))
             print("Image was successfully created.")
             try:
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(image_conf['resource_group_name'],
-                                                       image_conf['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(image_conf['resource_group_name'],
+                                                     image_conf['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
-                                                                       image_conf['instance_name'])
-                remount_azure_disk(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(image_conf['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
+                                                                     image_conf['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
+                dlab.fab.set_git_proxy(image_conf['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_hostname))
                 additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, image_conf['instance_name'], keyfile_name,
@@ -98,9 +104,8 @@
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
                 print("Image was successfully created. It's name is {}".format(image_conf['full_image_name']))
             except Exception as err:
-                print('Error: {0}'.format(err))
-                AzureActions().remove_instance(image_conf['resource_group_name'], image_conf['instance_name'])
-                append_result("Failed to create instance from image.", str(err))
+                AzureActions.remove_instance(image_conf['resource_group_name'], image_conf['instance_name'])
+                dlab.fab.append_result("Failed to create instance from image.", str(err))
                 sys.exit(1)
 
             with open("/root/result.json", 'w') as result:
@@ -114,6 +119,5 @@
                        "Action": "Create image from notebook"}
                 result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create image from notebook", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create image from notebook", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py b/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
index cfe37fc..2a9e606 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
@@ -35,9 +35,9 @@
 args = parser.parse_args()
 
 resource_group_name = os.environ['azure_resource_group_name']
-ssn_storage_account_tag = ('{0}-{1}-{2}-storage'.format(os.environ['conf_service_base_name'], os.environ['project_name'],
-                                                        os.environ['endpoint_name']))
-container_name = ('{}-ssn-container'.format(os.environ['conf_service_base_name'])).lower().replace('_', '-')
+ssn_storage_account_tag = ('{0}-{1}-{2}-bucket'.format(os.environ['conf_service_base_name'], os.environ['project_name'],
+                                                       os.environ['endpoint_name']))
+container_name = ('{}-ssn-bucket'.format(os.environ['conf_service_base_name'])).lower().replace('_', '-')
 gitlab_certfile = os.environ['conf_gitlab_certfile']
 
 if __name__ == "__main__":
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
index fc3e56f..2e697eb 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
+from fabric.api import *
+import traceback
+
+
+def clear_resources():
+    for i in range(notebook_config['instance_count'] - 1):
+        slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+        AzureActions.remove_instance(notebook_config['resource_group_name'], slave_name)
+    AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
 
 
 if __name__ == "__main__":
@@ -41,48 +50,50 @@
 
     try:
         # generating variables dictionary
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         print('Generating infrastructure names and tags')
         notebook_config = dict()
-        try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        else:
             notebook_config['exploratory_name'] = ''
-        try:
-            notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            notebook_config['computational_name'] = os.environ['computational_name']
+        else:
             notebook_config['computational_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['region'] = os.environ['azure_region']
-        notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
-        notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
-                                          '-de-' + notebook_config['exploratory_name'] + '-' + \
-                                          notebook_config['computational_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['cluster_name'] = '{}-{}-{}-de-{}'.format(notebook_config['service_base_name'],
+                                                                  notebook_config['project_name'],
+                                                                  notebook_config['endpoint_name'],
+                                                                  notebook_config['computational_name'])
         notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
         notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-        notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+        notebook_config['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
         try:
-            notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
+            notebook_config['spark_master_ip'] = AzureMeta.get_private_ip_address(
                 notebook_config['resource_group_name'], notebook_config['master_node_name'])
-            notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+            notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
                 notebook_config['resource_group_name'], notebook_config['notebook_name'])
         except Exception as err:
-            print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to get instance IP address", str(err))
+            clear_resources()
             sys.exit(1)
         notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
 
     except Exception as err:
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
-        append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
         sys.exit(1)
 
     try:
@@ -100,12 +111,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
-        append_result("Failed installing Dataengine kernels.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -125,12 +132,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
-        append_result("Failed to configure Spark.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
         sys.exit(1)
 
     try:
@@ -139,6 +142,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
index b7e493c..a4dda9d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import os
 from Crypto.PublicKey import RSA
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -42,17 +44,19 @@
 
     # generating variables dictionary
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['application'] = os.environ['application'].lower().replace('_', '-')
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['application'] = os.environ['application'].lower()
         
         print('Generating infrastructure names and tags')
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
@@ -61,9 +65,10 @@
         notebook_config['vpc_name'] = os.environ['azure_vpc_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['instance_name'] = '{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'],
-                                                                notebook_config['exploratory_name'])
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
                                    "User": notebook_config['user_name'],
@@ -72,10 +77,12 @@
                                    "Exploratory": notebook_config['exploratory_name'],
                                    "product": "dlab"}
         notebook_config['network_interface_name'] = notebook_config['instance_name'] + "-nif"
-        notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'])
-        notebook_config['private_subnet_name'] = '{}-{}-subnet'.format(notebook_config['service_base_name'],
-                                                                       notebook_config['project_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['private_subnet_name'] = '{}-{}-{}-subnet'.format(notebook_config['service_base_name'],
+                                                                          notebook_config['project_name'],
+                                                                          notebook_config['endpoint_name'])
         ssh_key_path = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         key = RSA.importKey(open(ssh_key_path, 'rb').read())
         notebook_config['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
@@ -83,33 +90,32 @@
         notebook_config['instance_storage_account_type'] = (lambda x: 'Standard_LRS' if x in ('deeplearning', 'tensor')
                                                             else 'Premium_LRS')(os.environ['application'])
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
         notebook_config['image_type'] = 'default'
 
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            notebook_config['endpoint_name'],
-            notebook_config['project_name'],
-            notebook_config['application'])
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                notebook_config['application'])
         else:
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            notebook_config['endpoint_name'],
-            notebook_config['application'])
-        notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}'.format(notebook_config['service_base_name'],
-                                                                                 os.environ['project_name'],
-                                                                                 os.environ['application'],
-                                                                                 os.environ['notebook_image_name']).lower().replace('_', '-') if (x != 'None' and x != '')
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                notebook_config['application'])
+        notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}-{4}'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+            os.environ['application'], os.environ['notebook_image_name']).replace('_', '-') if (x != 'None' and x != '')
             else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
         print('Searching pre-configured images')
         notebook_config['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
-        if AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['notebook_image_name']):
+        if AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['notebook_image_name']):
             notebook_config['image_name'] = notebook_config['notebook_image_name']
             notebook_config['image_type'] = 'pre-configured'
             print('Pre-configured image found. Using: {}'.format(notebook_config['notebook_image_name']))
@@ -118,27 +124,26 @@
             print('No pre-configured image found. Using default one: {}'.format(notebook_config['image_name']))
     except Exception as err:
         print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
-        edge_status = AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                      '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
-                                                                                notebook_config['project_name'],
-                                                                                notebook_config['endpoint_name']))
+        edge_status = AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                    '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
+                                                                              notebook_config['project_name'],
+                                                                              notebook_config['endpoint_name']))
 
         if edge_status != 'running':
             logging.info('ERROR: Edge node is unavailable! Aborting...')
             print('ERROR: Edge node is unavailable! Aborting...')
-            ssn_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
+            ssn_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
                                                               os.environ['conf_service_base_name'] + '-ssn')
-            put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                                ssn_hostname)
-            append_result("Edge node is unavailable")
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
             sys.exit(1)
     except Exception as err:
-        print("Failed to verify edge status.")
-        append_result("Failed to verify edge status.", str(err))
+        dlab.fab.append_result("Failed to verify edge status.", str(err))
         sys.exit(1)
 
     with open('/root/result.json', 'w') as f:
@@ -157,20 +162,20 @@
             format(notebook_config['instance_name'], notebook_config['instance_size'], notebook_config['region'],
                    notebook_config['vpc_name'], notebook_config['network_interface_name'],
                    notebook_config['security_group_name'], notebook_config['private_subnet_name'],
-                   notebook_config['service_base_name'], notebook_config['resource_group_name'], initial_user,
-                   'None', notebook_config['public_ssh_key'], notebook_config['primary_disk_size'], 'notebook',
-                   notebook_config['project_name'], notebook_config['instance_storage_account_type'],
-                   notebook_config['image_name'], notebook_config['image_type'], json.dumps(notebook_config['tags']))
+                   notebook_config['service_base_name'], notebook_config['resource_group_name'],
+                   notebook_config['initial_user'], 'None', notebook_config['public_ssh_key'],
+                   notebook_config['primary_disk_size'], 'notebook', notebook_config['project_name'],
+                   notebook_config['instance_storage_account_type'], notebook_config['image_name'],
+                   notebook_config['image_type'], json.dumps(notebook_config['tags']))
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         except:
             print("The instance hasn't been created.")
-        append_result("Failed to create instance.", str(err))
+        dlab.fab.append_result("Failed to create instance.", str(err))
         sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
index 5dcbf3e..ab3c080 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 import argparse
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,6 +42,8 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
     notebook_config['service_base_name'] = os.environ['conf_service_base_name']
@@ -51,37 +55,37 @@
         print('[START NOTEBOOK]')
         try:
             print("Starting notebook")
-            AzureActions().start_instance(notebook_config['resource_group_name'], notebook_config['notebook_name'])
+            AzureActions.start_instance(notebook_config['resource_group_name'], notebook_config['notebook_name'])
             print("Instance {} has been started".format(notebook_config['notebook_name']))
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to start notebook.", str(err))
             raise Exception
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to start notebook.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[SETUP USER GIT CREDENTIALS]')
         print('[SETUP USER GIT CREDENTIALS]')
-        notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+        notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
             notebook_config['resource_group_name'], notebook_config['notebook_name'])
         notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to setup git credentials.", str(err))
             raise Exception
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
         sys.exit(1)
 
     if os.environ['azure_datalake_enable'] == 'true':
         try:
             logging.info('[UPDATE STORAGE CREDENTIALS]')
             print('[UPDATE STORAGE CREDENTIALS]')
-            notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+            notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
                 notebook_config['resource_group_name'], notebook_config['notebook_name'])
             env.hosts = "{}".format(notebook_config['notebook_ip'])
             env.user = os.environ['conf_os_user']
@@ -90,13 +94,14 @@
             params = '--refresh_token {}'.format(os.environ['azure_user_refresh_token'])
             try:
                 put('~/scripts/common_notebook_update_refresh_token.py', '/tmp/common_notebook_update_refresh_token.py')
-                sudo('mv /tmp/common_notebook_update_refresh_token.py /usr/local/bin/common_notebook_update_refresh_token.py')
+                sudo('mv /tmp/common_notebook_update_refresh_token.py '
+                     '/usr/local/bin/common_notebook_update_refresh_token.py')
                 sudo("/usr/bin/python /usr/local/bin/{}.py {}".format('common_notebook_update_refresh_token', params))
-            except Exception as err:
+            except:
                 traceback.print_exc()
-                append_result("Failed to update storage credentials.", str(err))
                 raise Exception
-        except:
+        except Exception as err:
+            dlab.fab.append_result("Failed to update storage credentials.", str(err))
             sys.exit(1)
 
     try:
@@ -106,16 +111,16 @@
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
         try:
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
             raise Exception
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to update last activity time.", str(err))
         sys.exit(1)
 
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                 notebook_config['notebook_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['notebook_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(notebook_config['notebook_name']))
@@ -126,8 +131,8 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
 
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
index 4c4ba17..5e77666 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
@@ -24,9 +24,9 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
 import argparse
@@ -37,26 +37,26 @@
     print("Stopping data engine cluster")
     cluster_list = []
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "notebook_name" in vm.tags:
                 if notebook_name == vm.tags['notebook_name']:
                     if 'master' == vm.tags["Type"]:
                         cluster_list.append(vm.tags["Name"])
-                    AzureActions().stop_instance(resource_group_name, vm.name)
+                    AzureActions.stop_instance(resource_group_name, vm.name)
                     print("Instance {} has been stopped".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop clusters", str(err))
         sys.exit(1)
 
     print("Stopping notebook")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if notebook_name == vm.tags["Name"]:
-                    AzureActions().stop_instance(resource_group_name, vm.name)
+                    AzureActions.stop_instance(resource_group_name, vm.name)
                     print("Instance {} has been stopped".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop instance", str(err))
         sys.exit(1)
 
 
@@ -69,15 +69,17 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        notebook_config['exploratory_name'] = os.environ['exploratory_name']
+    else:
         notebook_config['exploratory_name'] = ''
-    try:
-        notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        notebook_config['computational_name'] = os.environ['computational_name']
+    else:
         notebook_config['computational_name'] = ''
     notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -87,18 +89,15 @@
     try:
         stop_notebook(notebook_config['resource_group_name'], notebook_config['notebook_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop notebook.", str(err))
         sys.exit(1)
 
-
     try:
         with open("/root/result.json", 'w') as result:
             res = {"notebook_name": notebook_config['notebook_name'],
                    "Action": "Stop notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
index e08130d..73eab17 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
@@ -24,34 +24,35 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
+import traceback
 
 
 def terminate_nb(resource_group_name, notebook_name):
     print("Terminating data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "notebook_name" in vm.tags:
                 if notebook_name == vm.tags['notebook_name']:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate clusters", str(err))
         sys.exit(1)
 
     print("Terminating notebook")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if notebook_name == vm.tags["Name"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instance", str(err))
         sys.exit(1)
 
 
@@ -63,15 +64,17 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        notebook_config['exploratory_name'] = os.environ['exploratory_name']
+    else:
         notebook_config['exploratory_name'] = ''
-    try:
-        notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        notebook_config['computational_name'] = os.environ['computational_name']
+    else:
         notebook_config['computational_name'] = ''
     notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -83,7 +86,7 @@
             terminate_nb(notebook_config['resource_group_name'], notebook_config['notebook_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate notebook.", str(err))
+            dlab.fab.append_result("Failed to terminate notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -94,6 +97,6 @@
                    "Action": "Terminate notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
index ff186ac..c74abfe 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
@@ -21,23 +21,26 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
 import json
+import os
 
 
 if __name__ == "__main__":
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         image_conf = dict()
         image_conf['service_base_name'] = os.environ['conf_service_base_name']
         image_conf['resource_group_name'] = os.environ['azure_resource_group_name']
         image_conf['full_image_name'] = os.environ['notebook_image_name']
 
-        image = AzureMeta().get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+        image = AzureMeta.get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
         if image != '':
-            AzureActions().remove_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+            AzureActions.remove_image(image_conf['resource_group_name'], image_conf['full_image_name'])
 
             with open("/root/result.json", 'w') as result:
                 res = {"notebook_image_name": image_conf['full_image_name'],
@@ -45,6 +48,5 @@
                        "Action": "Delete existing notebook image"}
                 result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to delete existing notebook image", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to delete existing notebook image", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
index fea29cb..8d90b5e 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -37,7 +38,7 @@
 
 def configure_slave(slave_number, data_engine):
     slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
-    slave_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], slave_name)
+    slave_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'], slave_name)
     try:
         logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
         print('[CREATING DLAB SSH USER ON SLAVE NODE]')
@@ -51,18 +52,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to create ssh user on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON SLAVE]')
         logging.info('[INSTALLING USERs KEY ON SLAVE]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
             slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
@@ -72,13 +69,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'],
-                                       data_engine['master_node_name'])
-        append_result("Failed to install user ssh key on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install user ssh key on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -92,12 +84,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to clean slave instance..", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean slave instance..", str(err))
         sys.exit(1)
 
     try:
@@ -113,12 +101,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to configure proxy on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -133,13 +117,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to install prerequisites on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -157,16 +136,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed configuring slave node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to configure slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure slave node.", str(err))
         sys.exit(1)
 
 
+def clear_resources():
+    for i in range(data_engine['instance_count'] - 1):
+        slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+        AzureActions.remove_instance(data_engine['resource_group_name'], slave_name)
+    AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+
+
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
                                                os.environ['request_id'])
@@ -176,38 +157,41 @@
                         filename=local_log_filepath)
 
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         print('Generating infrastructure names and tags')
         data_engine = dict()
-        try:
-            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name']
+        else:
             data_engine['exploratory_name'] = ''
-        try:
-            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name']
+        else:
             data_engine['computational_name'] = ''
         data_engine['service_base_name'] = os.environ['conf_service_base_name']
         data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
         data_engine['region'] = os.environ['azure_region']
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['vpc_name'] = os.environ['azure_vpc_name']
-        data_engine['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        data_engine['endpoint_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['private_subnet_name'] = '{}-{}-subnet'.format(data_engine['service_base_name'],
-                                                                   data_engine['project_name'])
-        data_engine['private_subnet_cidr'] = AzureMeta().get_subnet(data_engine['resource_group_name'],
-                                                                    data_engine['vpc_name'],
-                                                                    data_engine['private_subnet_name']).address_prefix
-        data_engine['master_security_group_name'] = '{}-{}-dataengine-master-sg'.format(
-            data_engine['service_base_name'], data_engine['project_name'])
-        data_engine['slave_security_group_name'] = '{}-{}-dataengine-slave-sg'.format(data_engine['service_base_name'],
-                                                                                      data_engine['project_name'])
-        data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+        data_engine['user_name'] = os.environ['edge_user_name']
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['project_tag'] = data_engine['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
+        data_engine['private_subnet_name'] = '{}-{}-{}-subnet'.format(data_engine['service_base_name'],
+                                                                      data_engine['project_name'],
+                                                                      data_engine['endpoint_name'])
+        data_engine['private_subnet_cidr'] = AzureMeta.get_subnet(data_engine['resource_group_name'],
+                                                                  data_engine['vpc_name'],
+                                                                  data_engine['private_subnet_name']).address_prefix
+        data_engine['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                               data_engine['project_name'],
-                                                              data_engine['exploratory_name'],
+                                                              data_engine['endpoint_name'],
                                                               data_engine['computational_name'])
         data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
         data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
@@ -217,19 +201,20 @@
         data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
         data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
         data_engine['notebook_name'] = os.environ['notebook_instance_name']
-        master_node_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                           data_engine['master_node_name'])
+        master_node_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                data_engine['master_node_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
                                                        data_engine['project_name'],
                                                        data_engine['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                          edge_instance_name)
+        data_engine['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                       data_engine['region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(data_engine['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = data_engine['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         key = RSA.importKey(open(keyfile_name, 'rb').read())
         data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
@@ -240,13 +225,8 @@
             initial_user = 'ec2-user'
             sudo_group = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
@@ -262,18 +242,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to create ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on master.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON MASTER]')
         logging.info('[INSTALLING USERs KEY ON MASTER]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
             master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
@@ -284,12 +260,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to install ssh user key on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install ssh user key on master.", str(err))
         sys.exit(1)
 
 
@@ -304,12 +276,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to clean master instance.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -325,12 +293,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to configure proxy on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on master.", str(err))
         sys.exit(1)
 
     try:
@@ -345,13 +309,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to install prerequisites on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
         sys.exit(1)
 
     try:
@@ -369,12 +328,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure master node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to configure master node", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -389,18 +344,15 @@
             if job.exitcode != 0:
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to configure slave nodes", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
-        notebook_instance_ip = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                  data_engine['notebook_name'])
+        notebook_instance_ip = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                data_engine['notebook_name'])
         additional_info = {
             "computational_name": data_engine['computational_name'],
             "master_node_hostname": master_node_hostname,
@@ -425,21 +377,18 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to configure reverse proxy", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        ip_address = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                        data_engine['master_node_name'])
+        ip_address = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                      data_engine['master_node_name'])
         spark_master_url = "http://" + ip_address + ":8080"
-        spark_master_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+        spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
@@ -456,13 +405,14 @@
                    "Action": "Create new Data Engine",
                    "computational_url": [
                        {"description": "Apache Spark Master",
-                        "url": spark_master_acces_url},
+                        "url": spark_master_access_url},
                        # {"description": "Apache Spark Master (via tunnel)",
                        # "url": spark_master_url}
                    ]
                    }
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
index 24855fa..86dc7a9 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -43,44 +44,48 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         data_engine = dict()
-        data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        data_engine['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
+        data_engine['user_name'] = os.environ['edge_user_name']
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        data_engine['project_tag'] = data_engine['project_name']
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
         print('Generating infrastructure names and tags')
-        try:
-            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name']
+        else:
             data_engine['exploratory_name'] = ''
-        try:
-            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name']
+        else:
             data_engine['computational_name'] = ''
         data_engine['service_base_name'] = os.environ['conf_service_base_name']
         data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
         data_engine['region'] = os.environ['azure_region']
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['vpc_name'] = os.environ['azure_vpc_name']
-        data_engine['private_subnet_name'] = '{}-{}-subnet'.format(data_engine['service_base_name'],
-                                                                   data_engine['project_name'])
-        data_engine['private_subnet_cidr'] = AzureMeta().get_subnet(data_engine['resource_group_name'],
-                                                                    data_engine['vpc_name'],
-                                                                    data_engine['private_subnet_name']).address_prefix
-        data_engine['master_security_group_name'] = '{}-{}-dataengine-master-sg'.format(data_engine['service_base_name'],
-                                                                                        data_engine['project_name'])
-        data_engine['slave_security_group_name'] = '{}-{}-dataengine-slave-sg'.format(data_engine['service_base_name'],
-                                                                                      data_engine['project_name'])
-        data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+        data_engine['private_subnet_name'] = '{}-{}-{}-subnet'.format(data_engine['service_base_name'],
+                                                                      data_engine['project_name'],
+                                                                      data_engine['endpoint_name'])
+        data_engine['private_subnet_cidr'] = AzureMeta.get_subnet(data_engine['resource_group_name'],
+                                                                  data_engine['vpc_name'],
+                                                                  data_engine['private_subnet_name']).address_prefix
+        data_engine['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                               data_engine['project_name'],
-                                                              data_engine['exploratory_name'],
+                                                              data_engine['endpoint_name'],
                                                               data_engine['computational_name'])
         data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
         data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
         data_engine['master_network_interface_name'] = '{}-nif'.format(data_engine['master_node_name'])
         data_engine['master_size'] = os.environ['azure_dataengine_master_size']
-        key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']), 'rb').read())
+        key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'],
+                                                   os.environ['conf_key_name']), 'rb').read())
         data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
         data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
@@ -106,20 +111,19 @@
         data_engine['image_type'] = 'default'
 
         if os.environ['conf_shared_image_enabled'] == 'false':
-            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
+            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'],
+                os.environ['application'])
         else:
             data_engine['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                     os.environ['endpoint_name'],
+                                                                                     data_engine['endpoint_name'],
                                                                                      os.environ['application'])
 
         data_engine['notebook_image_name'] = (lambda x: os.environ['notebook_image_name'] if x != 'None'
                     else data_engine['expected_image_name'])(str(os.environ.get('notebook_image_name')))
 
         print('Searching pre-configured images')
-        if AzureMeta().get_image(data_engine['resource_group_name'], data_engine['notebook_image_name']) and \
+        if AzureMeta.get_image(data_engine['resource_group_name'], data_engine['notebook_image_name']) and \
                         os.environ['application'] in os.environ['dataengine_image_notebooks'].split(','):
             data_engine['image_name'] = data_engine['notebook_image_name']
             data_engine['image_type'] = 'pre-configured'
@@ -128,26 +132,25 @@
             data_engine['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
             print('No pre-configured image found. Using default one: {}'.format(data_engine['image_name']))
     except Exception as err:
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
         sys.exit(1)
 
     try:
-        edge_status = AzureMeta().get_instance_status(data_engine['resource_group_name'], '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
-                                                                                data_engine['project_name'],
-                                                                                data_engine['endpoint_name']))
+        edge_status = AzureMeta.get_instance_status(data_engine['resource_group_name'],
+                                                    '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
+                                                                              data_engine['project_name'],
+                                                                              data_engine['endpoint_name']))
         if edge_status != 'running':
             logging.info('ERROR: Edge node is unavailable! Aborting...')
             print('ERROR: Edge node is unavailable! Aborting...')
-            ssn_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                              os.environ['conf_service_base_name'] + '-ssn')
-            put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                                ssn_hostname)
-            append_result("Edge node is unavailable")
+            ssn_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                            data_engine['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
             sys.exit(1)
     except Exception as err:
-        print("Failed to verify edge status.")
-        append_result("Failed to verify edge status.", str(err))
+        dlab.fab.append_result("Failed to verify edge status.", str(err))
         sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
@@ -182,12 +185,11 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+            AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
         except:
             print("The instance hasn't been created.")
-        append_result("Failed to create master instance.", str(err))
+        dlab.fab.append_result("Failed to create master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -217,13 +219,12 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         for i in range(data_engine['instance_count'] - 1):
             slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
             try:
-                AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
+                AzureActions.remove_instance(data_engine['resource_group_name'], slave_name)
             except:
                 print("The slave instance {} hasn't been created.".format(slave_name))
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to create slave instances.", str(err))
+        AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to create slave instances.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
index cf2a613..308912f 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
@@ -24,23 +24,25 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
+from fabric.api import *
 
 
 def start_data_engine(resource_group_name, cluster_name):
     print("Starting data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if cluster_name == vm.tags["Name"]:
-                    AzureActions().start_instance(resource_group_name, vm.name)
+                    AzureActions.start_instance(resource_group_name, vm.name)
                     print("Instance {} has been started".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to start dataengine", str(err))
         sys.exit(1)
 
 
@@ -52,23 +54,26 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name']
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name']
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
+    data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                           data_engine['project_name'],
-                                                          data_engine['exploratory_name'],
+                                                          data_engine['endpoint_name'],
                                                           data_engine['computational_name'])
     try:
         logging.info('[STARTING DATA ENGINE]')
@@ -86,8 +91,10 @@
         logging.info('[UPDATE LAST ACTIVITY TIME]')
         print('[UPDATE LAST ACTIVITY TIME]')
         data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
-        data_engine['notebook_ip'] = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], os.environ['notebook_instance_name'])
-        data_engine['computational_ip'] = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], data_engine['computational_id'])
+        data_engine['notebook_ip'] = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                      os.environ['notebook_instance_name'])
+        data_engine['computational_ip'] = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                           data_engine['computational_id'])
         data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
             .format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -96,18 +103,17 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
 
-
     try:
         with open("/root/result.json", 'w') as result:
             res = {"service_base_name": data_engine['service_base_name'],
                    "Action": "Start Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
index ef1521f..963c555 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
@@ -24,23 +24,24 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
 
 
 def stop_data_engine(resource_group_name, cluster_name):
     print("Stopping data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if cluster_name == vm.tags["Name"]:
-                    AzureActions().stop_instance(resource_group_name, vm.name)
+                    AzureActions.stop_instance(resource_group_name, vm.name)
                     print("Instance {} has been stopped".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop dataengine", str(err))
         sys.exit(1)
 
 
@@ -52,23 +53,26 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name']
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name']
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
     data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
-    data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+    data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                           data_engine['project_name'],
-                                                          data_engine['exploratory_name'],
+                                                          data_engine['endpoint_name'],
                                                           data_engine['computational_name'])
     try:
         logging.info('[STOPPING DATA ENGINE]')
@@ -77,7 +81,7 @@
             stop_data_engine(data_engine['resource_group_name'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to stop Data Engine.", str(err))
+            dlab.fab.append_result("Failed to stop Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -88,6 +92,6 @@
                    "Action": "Stop Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
index 974fc3e..1363eb8 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
@@ -24,30 +24,31 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
 
 
 def terminate_data_engine(resource_group_name, notebook_name, os_user, key_path, cluster_name):
     print("Terminating data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if cluster_name == vm.tags["Name"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine", str(err))
         sys.exit(1)
 
     print("Removing Data Engine kernels from notebook")
     try:
-        AzureActions().remove_dataengine_kernels(resource_group_name, notebook_name, os_user, key_path, cluster_name)
+        AzureActions.remove_dataengine_kernels(resource_group_name, notebook_name, os_user, key_path, cluster_name)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove dataengine kernels from notebook", str(err))
         sys.exit(1)
 
 
@@ -59,23 +60,26 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name']
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name']
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
+    data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                           data_engine['project_name'],
-                                                          data_engine['exploratory_name'],
+                                                          data_engine['endpoint_name'],
                                                           data_engine['computational_name'])
     data_engine['notebook_name'] = os.environ['notebook_instance_name']
     data_engine['key_path'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
@@ -89,7 +93,7 @@
                                   os.environ['conf_os_user'], data_engine['key_path'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Data Engine.", str(err))
+            dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -100,6 +104,6 @@
                    "Action": "Terminate Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
index 9f99221..9c8f24c 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,33 +41,36 @@
                         filename=local_log_filepath)
 
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
-        notebook_config['instance_name'] = '{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'],
-                                                                notebook_config['exploratory_name'])
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "project_tag": notebook_config['project_tag'],
@@ -77,15 +82,16 @@
                 notebook_config['service_base_name'],
                 notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "endpoint_tag": notebook_config['endpoint_tag'],
                                              "Exploratory": notebook_config['exploratory_name'],
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-        notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -94,45 +100,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                           edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -140,9 +146,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -151,16 +156,16 @@
         print('[CONFIGURE PROXY ON DEEP LEARNING  INSTANCE]')
         additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
         params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -173,12 +178,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -194,9 +198,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -206,7 +209,7 @@
                  "--os_user {2} --jupyter_version {3} " \
                  "--scala_version {4} --spark_version {5} " \
                  "--hadoop_version {6} --region {7} " \
-                 "--r_mirror {8} --ip_adress {9} --exploratory_name {10} --edge_ip {11}" \
+                 "--r_mirror {8} --ip_address {9} --exploratory_name {10} --edge_ip {11}" \
                  .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
                          os.environ['notebook_jupyter_version'], os.environ['notebook_scala_version'],
                          os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
@@ -214,15 +217,14 @@
                          notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_deep_learning_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Deep Learning node.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -233,12 +235,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -254,44 +255,44 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
                                                        notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
                                                                        notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                      'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -316,25 +317,24 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         tensorboard_ip_url = 'http://' + ip_address + ':6006'
         jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-        jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             notebook_config['exploratory_name'])
-        jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
             notebook_config['exploratory_name'])
-        tensorboard_acces_url = "http://" + edge_instance_hostname + "/{}-tensor/".format(
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
             notebook_config['exploratory_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
@@ -359,11 +359,11 @@
                    "Action": "Create new notebook server",
                    "exploratory_url": [
                    {"description": "Jupyter",
-                    "url": jupyter_notebook_acces_url},
+                    "url": jupyter_notebook_access_url},
                    {"description": "TensorBoard",
-                    "url": tensorboard_acces_url},
+                    "url": tensorboard_access_url},
                    {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url}#,
+                    "url": jupyter_ungit_access_url}#,
                    #{"description": "Jupyter (via tunnel)",
                    # "url": jupyter_ip_url},
                    #{"description": "TensorBoard (via tunnel)",
@@ -373,7 +373,6 @@
                ]}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate output information.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py b/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
index 6307fcd..997d38f 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
@@ -22,10 +22,16 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
+import uuid
+from fabric.api import *
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -34,101 +40,121 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
+
+    def clear_resources():
+        AzureActions.remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+        AzureActions.remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
+                                   edge_conf['private_subnet_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'],
+                                           edge_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'],
+                                           edge_conf['master_security_group_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'],
+                                           edge_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(edge_conf['resource_group_name']):
+            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
+                AzureActions.remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
+        if os.environ['azure_datalake_enable'] == 'true':
+            for datalake in AzureMeta.list_datalakes(edge_conf['resource_group_name']):
+                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
+                    AzureActions.remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+
     try:
         print('Generating infrastructure names and tags')
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         edge_conf = dict()
-
-        edge_conf['service_base_name'] = os.environ['conf_service_base_name']
+        edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
         edge_conf['key_name'] = os.environ['conf_key_name']
         edge_conf['vpc_name'] = os.environ['azure_vpc_name']
         edge_conf['region'] = os.environ['azure_region']
         edge_conf['subnet_name'] = os.environ['azure_subnet_name']
-        edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        edge_conf['user_keyname'] = os.environ['project_name']
-        edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-subnet'
+        edge_conf['project_name'] = (os.environ['project_name'])
+        edge_conf['endpoint_name'] = (os.environ['endpoint_name'])
+        edge_conf['user_keyname'] = edge_conf['project_name']
+        edge_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(edge_conf['service_base_name'],
+                                                                    edge_conf['project_name'],
+                                                                    edge_conf['endpoint_name'])
         edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                                edge_conf['project_name'], edge_conf['endpoint_name'])
-        edge_conf['network_interface_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + \
-                                              '-edge-nif'
-        edge_conf['static_public_ip_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + \
-                                             '-edge-ip'
-        edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0'
-        edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + edge_conf['region'] + \
-                                         '.cloudapp.azure.com'
-        edge_conf['user_storage_account_name'] = '{0}-{1}-{2}-storage'.format(edge_conf['service_base_name'],
-                                                                              edge_conf['project_name'],
-                                                                              edge_conf['endpoint_name'])
-        edge_conf['user_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['project_name'] +
-                                            '-container').lower()
-        edge_conf['shared_storage_account_name'] = '{0}-{1}-shared-storage'.format(edge_conf['service_base_name'],
-                                                                                   edge_conf['endpoint_name'])
-        edge_conf['shared_container_name'] = (edge_conf['service_base_name'] + '-shared-container').lower()
-        edge_conf['datalake_store_name'] = edge_conf['service_base_name'] + '-ssn-datalake'
-        edge_conf['datalake_shared_directory_name'] = edge_conf['service_base_name'] + '-shared-folder'
-        edge_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(edge_conf['service_base_name'],
-                                                                            edge_conf['project_name'])
-        edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg'
-        edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + \
-                                                    '-nb-sg'
-        edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                    + edge_conf['project_name'] + '-dataengine-master-sg'
-        edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                   + edge_conf['project_name'] + '-dataengine-slave-sg'
+        edge_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_conf['instance_name'],
+                                                                                edge_conf['region'])
+        edge_conf['user_storage_account_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                                             edge_conf['project_name'],
+                                                                             edge_conf['endpoint_name']).lower()
+        edge_conf['user_container_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                                       edge_conf['project_name'],
+                                                                       edge_conf['endpoint_name']).lower()
+        edge_conf['shared_storage_account_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                                  edge_conf['endpoint_name']).lower()
+        edge_conf['shared_container_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                            edge_conf['endpoint_name']).lower()
+        edge_conf['datalake_store_name'] = '{}-ssn-datalake'.format(edge_conf['service_base_name'])
+        edge_conf['datalake_shared_directory_name'] = '{}-shared-folder'.format(edge_conf['service_base_name'])
+        edge_conf['datalake_user_directory_name'] = '{0}-{1}-{2}-folder'.format(edge_conf['service_base_name'],
+                                                                                edge_conf['project_name'],
+                                                                                edge_conf['endpoint_name'])
+        edge_conf['edge_security_group_name'] = '{}-sg'.format(edge_conf['instance_name'])
+        edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
+                                                                            edge_conf['project_name'],
+                                                                            edge_conf['endpoint_name'])
+        edge_conf['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(edge_conf['service_base_name'],
+                                                                                 edge_conf['project_name'],
+                                                                                 edge_conf['endpoint_name'])
+        edge_conf['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(edge_conf['service_base_name'],
+                                                                               edge_conf['project_name'],
+                                                                               edge_conf['endpoint_name'])
         edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
-        edge_conf['private_subnet_cidr'] = AzureMeta().get_subnet(edge_conf['resource_group_name'],
-                                                                  edge_conf['vpc_name'],
-                                                                  edge_conf['private_subnet_name']).address_prefix
+        edge_conf['keyfile_name'] = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
+        edge_conf['private_subnet_cidr'] = AzureMeta.get_subnet(edge_conf['resource_group_name'],
+                                                                edge_conf['vpc_name'],
+                                                                edge_conf['private_subnet_name']).address_prefix
         if os.environ['conf_network_type'] == 'private':
-            edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
-                                                                              edge_conf['instance_name'])
-            edge_conf['edge_public_ip'] =  edge_conf['edge_private_ip']
+            edge_conf['edge_private_ip'] = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+                                                                            edge_conf['instance_name'])
+            edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
+            edge_conf['instance_hostname'] = edge_conf['edge_private_ip']
         else:
-            edge_conf['edge_public_ip'] = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
-                                                                        edge_conf['instance_name'])
-            edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
-                                                                                    edge_conf['instance_name'])
-        instance_hostname = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
-                                                                        edge_conf['instance_name'])
-        edge_conf['vpc_cidrs'] = AzureMeta().get_vpc(edge_conf['resource_group_name'],
-                                                      edge_conf['vpc_name']).address_space.address_prefixes
+            edge_conf['edge_public_ip'] = AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+                                                                                   edge_conf['instance_name'])
+            edge_conf['edge_private_ip'] = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+                                                                            edge_conf['instance_name'])
+            edge_conf['instance_hostname'] = edge_conf['instance_dns_name']
+        edge_conf['vpc_cidrs'] = AzureMeta.get_vpc(edge_conf['resource_group_name'],
+                                                   edge_conf['vpc_name']).address_space.address_prefixes
+
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            edge_conf['step_cert_sans'] = ' --san {0} '.format(AzureMeta.get_private_ip_address(
+                edge_conf['resource_group_name'], edge_conf['instance_name']))
+            if os.environ['conf_network_type'] == 'public':
+                edge_conf['step_cert_sans'] += ' --san {0} --san {1} '.format(
+                    AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+                                                             edge_conf['instance_name']),
+                    edge_conf['instance_dns_name'])
+        else:
+            edge_conf['step_cert_sans'] = ''
+
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate infrastructure names", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            edge_conf['initial_user'] = 'ubuntu'
+            edge_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            edge_conf['initial_user'] = 'ec2-user'
+            edge_conf['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             edge_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            edge_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -136,57 +162,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING PREREQUISITES]')
         logging.info('[INSTALLING PREREQUISITES]')
-        params = "--hostname {} --keyfile {} --user {} --region {}".\
-            format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['azure_region'])
+        params = "--hostname {} --keyfile {} --user {} --region {}".format(
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+            os.environ['azure_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -194,40 +187,24 @@
         logging.info('[INSTALLING HTTP PROXY]')
         additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
                              "template_file": "/root/templates/squid.conf",
-                             "project_name": os.environ['project_name'],
+                             "project_name": edge_conf['project_name'],
                              "ldap_host": os.environ['ldap_hostname'],
                              "ldap_dn": os.environ['ldap_dn'],
                              "ldap_user": os.environ['ldap_service_username'],
                              "ldap_password": os.environ['ldap_service_password'],
                              "vpc_cidrs": edge_conf['vpc_cidrs'],
                              "allowed_ip_cidr": ['0.0.0.0/0']}
-        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
-                 .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('configure_http_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing http proxy.", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing http proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
 
@@ -237,72 +214,57 @@
         additional_config = {"user_keyname": edge_conf['user_keyname'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key. Excpeption: " + str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing users key. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING NGINX REVERSE PROXY]')
         logging.info('[INSTALLING NGINX REVERSE PROXY]')
-        params = "--hostname {} --keyfile {} --user {}" \
-            .format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'])
+        edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
+        params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
+                 "--step_cert_sans '{}'".format(
+                  edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+                  edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-' + edge_conf['endpoint_name'],
+                  edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
+
         try:
             local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
+        keycloak_params = "--service_base_name {} --keycloak_auth_server_url {} --keycloak_realm_name {} " \
+                          "--keycloak_user {} --keycloak_user_password {} --keycloak_client_secret {} " \
+                          "--edge_public_ip {} --project_name {} --endpoint_name {} ".format(
+                           edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
+                           os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
+                           os.environ['keycloak_user_password'],
+                           edge_conf['keycloak_client_secret'], edge_conf['instance_hostname'], edge_conf['project_name'],
+                           edge_conf['endpoint_name'])
+        try:
+            local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
+        except:
+            traceback.print_exc()
+            raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing Nginx reverse proxy. Excpeption: " + str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing Nginx reverse proxy. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
+        for storage_account in AzureMeta.list_storage_accounts(edge_conf['resource_group_name']):
             if edge_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                shared_storage_account_name = storage_account.name
+                edge_conf['shared_storage_account_name'] = storage_account.name
             if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                user_storage_account_name = storage_account.name
+                edge_conf['user_storage_account_name'] = storage_account.name
 
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
@@ -311,13 +273,13 @@
         print("Public IP: {}".format(edge_conf['edge_public_ip']))
         print("Private IP: {}".format(edge_conf['edge_private_ip']))
         print("Key name: {}".format(edge_conf['key_name']))
-        print("User storage account name: {}".format(user_storage_account_name))
+        print("User storage account name: {}".format(edge_conf['user_storage_account_name']))
         print("User container name: {}".format(edge_conf['user_container_name']))
         if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
+            for datalake in AzureMeta.list_datalakes(edge_conf['resource_group_name']):
                 if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    datalake_id = datalake.name
-            print("Data Lake name: {}".format(datalake_id))
+                    edge_conf['datalake_id'] = datalake.name
+            print("Data Lake name: {}".format(edge_conf['datalake_id']))
             print("Data Lake tag name: {}".format(edge_conf['datalake_store_name']))
             print("Data Lake Store user directory name: {}".format(edge_conf['datalake_user_directory_name']))
         print("Notebook SG: {}".format(edge_conf['notebook_security_group_name']))
@@ -329,9 +291,9 @@
                        "public_ip": edge_conf['edge_public_ip'],
                        "ip": edge_conf['edge_private_ip'],
                        "key_name": edge_conf['key_name'],
-                       "user_storage_account_name": user_storage_account_name,
+                       "user_storage_account_name": edge_conf['user_storage_account_name'],
                        "user_container_name": edge_conf['user_container_name'],
-                       "shared_storage_account_name": shared_storage_account_name,
+                       "shared_storage_account_name": edge_conf['shared_storage_account_name'],
                        "shared_container_name": edge_conf['shared_container_name'],
                        "user_storage_account_tag_name": edge_conf['user_storage_account_name'],
                        "tunnel_port": "22",
@@ -341,7 +303,7 @@
                        "notebook_subnet": edge_conf['private_subnet_cidr'],
                        "instance_id": edge_conf['instance_name'],
                        "full_edge_conf": edge_conf,
-                       "project_name": os.environ['project_name'],
+                       "project_name": edge_conf['project_name'],
                        "@class": "com.epam.dlab.dto.azure.edge.EdgeInfoAzure",
                        "Action": "Create new EDGE server"}
             else:
@@ -349,12 +311,12 @@
                        "public_ip": edge_conf['edge_public_ip'],
                        "ip": edge_conf['edge_private_ip'],
                        "key_name": edge_conf['key_name'],
-                       "user_storage_account_name": user_storage_account_name,
+                       "user_storage_account_name": edge_conf['user_storage_account_name'],
                        "user_container_name": edge_conf['user_container_name'],
-                       "shared_storage_account_name": shared_storage_account_name,
+                       "shared_storage_account_name": edge_conf['shared_storage_account_name'],
                        "shared_container_name": edge_conf['shared_container_name'],
                        "user_storage_account_tag_name": edge_conf['user_storage_account_name'],
-                       "datalake_name": datalake_id,
+                       "datalake_name": edge_conf['datalake_id'],
                        "datalake_tag_name": edge_conf['datalake_store_name'],
                        "datalake_shared_directory_name": edge_conf['datalake_shared_directory_name'],
                        "datalake_user_directory_name": edge_conf['datalake_user_directory_name'],
@@ -365,11 +327,12 @@
                        "notebook_subnet": edge_conf['private_subnet_cidr'],
                        "instance_id": edge_conf['instance_name'],
                        "full_edge_conf": edge_conf,
-                       "project_name": os.environ['project_name'],
+                       "project_name": edge_conf['project_name'],
                        "@class": "com.epam.dlab.dto.azure.edge.EdgeInfoAzure",
                        "Action": "Create new EDGE server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
index 0f00ce2..9dc1b01 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
@@ -57,20 +57,20 @@
             edge_conf['static_public_ip_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge-ip'
         edge_conf['region'] = os.environ['azure_region']
         edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-        edge_conf['private_subnet_prefix'] = os.environ['azure_private_subnet_prefix']
+        edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
         edge_conf['instance_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge'
-        edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0'
+        edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-volume-primary'
         edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg'
-        edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name']\
+        edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + "-" + os.environ['endpoint_name']\
             + '-nb-sg'
         edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                    + edge_conf['user_name'] + '-dataengine-master-sg'
+                                                    + edge_conf['user_name'] + '-de-master-sg'
         edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                   + edge_conf['user_name'] + '-dataengine-slave-sg'
+                                                   + edge_conf['user_name'] + '-de-slave-sg'
         edge_conf['edge_storage_account_name'] = ('{0}-{1}-{2}-storage'.format(edge_conf['service_base_name'],
                                                                                edge_conf['user_name'],
                                                                                edge_conf['endpoint_name']))
-        edge_conf['edge_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['user_name'] +
+        edge_conf['edge_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['user_name'] + '-' + edge_conf['endpoint_name'] +
                                             '-container').lower()
         edge_conf['datalake_store_name'] = edge_conf['service_base_name'] + '-ssn-datalake'
         edge_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(edge_conf['service_base_name'],
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_start.py b/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
index 445f48d..04e57ae 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import logging
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,29 +39,31 @@
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     edge_conf = dict()
     edge_conf['service_base_name'] = os.environ['conf_service_base_name']
     edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+    edge_conf['project_name'] = os.environ['project_name']
+    edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + os.environ['azure_region'] + '.cloudapp.azure.com'
+    edge_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_conf['instance_name'],
+                                                                            os.environ['azure_region'])
 
     logging.info('[START EDGE]')
     print('[START EDGE]')
     try:
-        AzureActions().start_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+        AzureActions.start_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start edge.", str(err))
+        dlab.fab.append_result("Failed to start edge.", str(err))
         sys.exit(1)
 
     try:
-        public_ip_address = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
-                                                                       edge_conf['instance_name'])
-        private_ip_address = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
-                                                                         edge_conf['instance_name'])
+        public_ip_address = AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+                                                                     edge_conf['instance_name'])
+        private_ip_address = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+                                                              edge_conf['instance_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
@@ -72,7 +78,7 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_status.py b/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
index 8c16d12..1b3fd15 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
@@ -23,14 +23,20 @@
 
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+from fabric.api import *
+import traceback
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
@@ -44,13 +50,12 @@
         logging.info('[COLLECT DATA]')
         print('[COLLECTING DATA]')
         params = '--resource_group_name {} --list_resources "{}"'.format(edge_conf['resource_group_name'],
-                                                                      os.environ['edge_list_resources'])
+                                                                         os.environ['edge_list_resources'])
         try:
             local("~/scripts/{}.py {}".format('common_collect_data', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to collect necessary information.", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to collect necessary information.", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py b/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
index 1bb319b..dfc4cba 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
@@ -20,10 +20,13 @@
 # under the License.
 #
 # ******************************************************************************
-
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import os
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,21 +38,22 @@
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     edge_conf = dict()
     edge_conf['service_base_name'] = os.environ['conf_service_base_name']
     edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+    edge_conf['project_name'] = os.environ['project_name']
+    edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
 
     logging.info('[STOP EDGE]')
     print('[STOP EDGE]')
     try:
-        AzureActions().stop_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+        AzureActions.stop_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop edge.", str(err))
+        dlab.fab.append_result("Failed to stop edge.", str(err))
         sys.exit(1)
 
     try:
@@ -58,7 +62,7 @@
                    "Action": "Stop edge server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
index d785f23..a61c75d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
@@ -22,128 +22,137 @@
 # ******************************************************************************
 
 import json
-import sys, time, os
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
 
 
 def terminate_edge_node(resource_group_name, service_base_name, project_tag, subnet_name, vpc_name):
     print("Terminating EDGE, notebook and dataengine virtual machines")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             try:
                 if project_tag == vm.tags["project_tag"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instance", str(err))
         sys.exit(1)
 
     print("Removing network interfaces")
     try:
-        for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+        for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
             try:
                 if project_tag == network_interface.tags["project_tag"]:
-                    AzureActions().delete_network_if(resource_group_name, network_interface.name)
+                    AzureActions.delete_network_if(resource_group_name, network_interface.name)
                     print("Network interface {} has been removed".format(network_interface.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove network interfaces", str(err))
         sys.exit(1)
 
     print("Removing static public IPs")
     try:
-        for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+        for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
             try:
                 if project_tag in static_public_ip.tags["project_tag"]:
-                    AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+                    AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
                     print("Static public IP {} has been removed".format(static_public_ip.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IPs", str(err))
         sys.exit(1)
 
     print("Removing disks")
     try:
-        for disk in AzureMeta().list_disks(resource_group_name):
+        for disk in AzureMeta.list_disks(resource_group_name):
             try:
                 if project_tag in disk.tags["project_tag"]:
-                    AzureActions().remove_disk(resource_group_name, disk.name)
+                    AzureActions.remove_disk(resource_group_name, disk.name)
                     print("Disk {} has been removed".format(disk.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove disks", str(err))
         sys.exit(1)
 
     print("Removing storage account")
     try:
-        for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+        for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
             try:
                 if project_tag == storage_account.tags["project_tag"]:
-                    AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+                    AzureActions.remove_storage_account(resource_group_name, storage_account.name)
                     print("Storage account {} has been terminated".format(storage_account.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove storage accounts", str(err))
         sys.exit(1)
 
     print("Deleting Data Lake Store directory")
     try:
-        for datalake in AzureMeta().list_datalakes(resource_group_name):
+        for datalake in AzureMeta.list_datalakes(resource_group_name):
             try:
                 if service_base_name == datalake.tags["SBN"]:
-                    AzureActions().remove_datalake_directory(datalake.name, project_tag + '-folder')
+                    AzureActions.remove_datalake_directory(datalake.name, project_tag + '-folder')
                     print("Data Lake Store directory {} has been deleted".format(project_tag + '-folder'))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove Data Lake", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+        for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
             try:
                 if project_tag == sg.tags["project_tag"]:
-                    AzureActions().remove_security_group(resource_group_name, sg.name)
+                    AzureActions.remove_security_group(resource_group_name, sg.name)
                     print("Security group {} has been terminated".format(sg.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        AzureActions().remove_subnet(resource_group_name, vpc_name, subnet_name)
+        AzureActions.remove_subnet(resource_group_name, vpc_name, subnet_name)
         print("Private subnet {} has been terminated".format(subnet_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnet", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     edge_conf = dict()
     edge_conf['service_base_name'] = os.environ['conf_service_base_name']
     edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    edge_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-    edge_conf['project_name'] = os.environ['project_name'].replace('_', '-')
-    edge_conf['project_tag'] = os.environ['project_name'].replace('_', '-')
-    edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + '-subnet'
+    edge_conf['user_name'] = os.environ['edge_user_name']
+    edge_conf['project_name'] = os.environ['project_name']
+    edge_conf['project_tag'] = edge_conf['project_name']
+    edge_conf['endpoint_name'] = os.environ['endpoint_name']
+    edge_conf['private_subnet_name'] = "{}-{}-{}-subnet".format(edge_conf['service_base_name'],
+                                                                edge_conf['project_name'], edge_conf['endpoint_name'])
     edge_conf['vpc_name'] = os.environ['azure_vpc_name']
 
 
@@ -153,10 +162,11 @@
         try:
             terminate_edge_node(edge_conf['resource_group_name'], edge_conf['service_base_name'],
                                 edge_conf['project_tag'], edge_conf['private_subnet_name'], edge_conf['vpc_name'])
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
-    except:
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to terminate edge.", str(err))
         sys.exit(1)
 
     try:
@@ -166,6 +176,6 @@
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
index 4785939..49f9872 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -38,53 +40,57 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
-        notebook_config['instance_name'] = '{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'],
-                                                                notebook_config['exploratory_name'])
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            notebook_config['endpoint_name'],
-            notebook_config['project_name'],
-            os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
-                                       "SBN": notebook_config['service_base_name'],
-                                       "User": notebook_config['user_name'],
-                                       "project_tag": notebook_config['project_tag'],
-                                       "endpoint_tag": notebook_config['endpoint_tag'],
-                                       "Exploratory": notebook_config['exploratory_name'],
-                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "project_tag": notebook_config['project_tag'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         else:
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
                 notebook_config['service_base_name'],
                 notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
-                                       "SBN": notebook_config['service_base_name'],
-                                       "User": notebook_config['user_name'],
-                                       "endpoint_tag": notebook_config['endpoint_tag'],
-                                       "Exploratory": notebook_config['exploratory_name'],
-                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-        notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -93,45 +99,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                    edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -139,9 +145,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -158,9 +163,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -176,9 +180,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -189,7 +192,7 @@
                  "--region {2} --spark_version {3} " \
                  "--hadoop_version {4} --os_user {5} " \
                  "--scala_version {6} --r_mirror {7} " \
-                 "--ip_adress {8} --exploratory_name {9} --edge_ip {10}".\
+                 "--ip_address {8} --exploratory_name {9} --edge_ip {10}".\
             format(instance_hostname, keyfile_name,
                    os.environ['azure_region'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
@@ -197,15 +200,14 @@
                    notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -218,12 +220,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -235,12 +236,11 @@
             # local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -256,44 +256,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image from notebook.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -318,23 +319,22 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # generating output information
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-        jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             notebook_config['exploratory_name'])
-        jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
             notebook_config['exploratory_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
@@ -358,9 +358,9 @@
                    "notebook_image_name": notebook_config['notebook_image_name'],
                    "exploratory_url": [
                        {"description": "Jupyter",
-                        "url": jupyter_notebook_acces_url},
+                        "url": jupyter_notebook_access_url},
                        {"description": "Ungit",
-                        "url": jupyter_ungit_acces_url}#,
+                        "url": jupyter_ungit_access_url}#,
                        #{"description": "Jupyter (via tunnel)",
                        # "url": jupyter_ip_url},
                        #{"description": "Ungit (via tunnel)",
@@ -368,6 +368,6 @@
                    ]}
             result.write(json.dumps(res))
     except Exception as err:
-        append_result("Failed to generate output information", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
new file mode 100644
index 0000000..4c44bbd
--- /dev/null
+++ b/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
@@ -0,0 +1,393 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+import json
+import sys
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+from fabric.api import *
+import traceback
+
+
+if __name__ == "__main__":
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
+        notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "project_tag": notebook_config['project_tag'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['tags'] = {"Name": notebook_config['instance_name'],
+                                   "SBN": notebook_config['service_base_name'],
+                                   "User": notebook_config['user_name'],
+                                   "project_tag": notebook_config['project_tag'],
+                                   "endpoint_tag": notebook_config['endpoint_tag'],
+                                   "Exploratory": notebook_config['exploratory_name'],
+                                   os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
+
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
+        if os.environ['conf_network_type'] == 'private':
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
+        else:
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
+
+        if os.environ['conf_os_family'] == 'debian':
+            initial_user = 'ubuntu'
+            sudo_group = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            initial_user = 'ec2-user'
+            sudo_group = 'wheel'
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        logging.info('[CREATING DLAB SSH USER]')
+        print('[CREATING DLAB SSH USER]')
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
+            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
+             notebook_config['dlab_ssh_user'], sudo_group)
+
+        try:
+            local("~/scripts/{}.py {}".format('create_ssh_user', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    # configuring proxy on Notebook instance
+    try:
+        logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
+        print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
+        additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
+        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_proxy', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    # updating repositories & installing python packages
+    try:
+        logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
+        print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
+        params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".\
+            format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['azure_region'],
+                   edge_instance_private_hostname)
+        try:
+            local("~/scripts/{}.py {}".format('install_prerequisites', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    # installing and configuring jupiter and all dependencies
+    try:
+        logging.info('[CONFIGURE JUPYTERLAB NOTEBOOK INSTANCE]')
+        print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
+        params = "--hostname {0} --keyfile {1} " \
+                 "--region {2} --spark_version {3} " \
+                 "--hadoop_version {4} --os_user {5} " \
+                 "--scala_version {6} --r_mirror {7} " \
+                 "--ip_address {8} --exploratory_name {9} --edge_ip {10}".\
+            format(instance_hostname, keyfile_name,
+                   os.environ['azure_region'], os.environ['notebook_spark_version'],
+                   os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
+                   os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'],
+                   notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
+        try:
+            local("~/scripts/{}.py {}".format('configure_jupyterlab_node', params))
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        print('[INSTALLING USERs KEY]')
+        logging.info('[INSTALLING USERs KEY]')
+        additional_config = {"user_keyname": notebook_config['user_keyname'],
+                             "user_keydir": os.environ['conf_key_dir']}
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('install_user_key', params))
+        except:
+            dlab.fab.append_result("Failed installing users key")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        print('[SETUP USER GIT CREDENTIALS]')
+        logging.info('[SETUP USER GIT CREDENTIALS]')
+        params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
+            .format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
+        try:
+            # local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
+            local("~/scripts/{}.py {}".format('manage_git_creds', params))
+        except:
+            dlab.fab.append_result("Failed setup git credentials")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    if notebook_config['image_enabled'] == 'true':
+        try:
+            print('[CREATING IMAGE]')
+            image = AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            if image == '':
+                print("Looks like it's first time we configure notebook server. Creating image.")
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                          notebook_config['instance_name'],
+                                                          os.environ['azure_region'],
+                                                          notebook_config['expected_image_name'],
+                                                          json.dumps(notebook_config['image_tags']))
+                print("Image was successfully created.")
+                local("~/scripts/{}.py".format('common_prepare_notebook'))
+                instance_running = False
+                while not instance_running:
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
+                        instance_running = True
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
+                additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
+                params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
+                    .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
+                            json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+                local("~/scripts/{}.py {}".format('common_configure_proxy', params))
+        except Exception as err:
+            dlab.fab.append_result("Failed creating image from notebook.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            sys.exit(1)
+
+    try:
+        print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        additional_info = {
+            'instance_hostname': instance_hostname,
+            'tensor': False
+        }
+        params = "--edge_hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} " \
+                 "--type {} " \
+                 "--exploratory_name {} " \
+                 "--additional_info '{}'"\
+            .format(edge_instance_private_hostname,
+                    keyfile_name,
+                    notebook_config['dlab_ssh_user'],
+                    'jupyter',
+                    notebook_config['exploratory_name'],
+                    json.dumps(additional_info))
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
+        except:
+            dlab.fab.append_result("Failed edge reverse proxy template")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
+
+    try:
+        print('[CONFIGURING PROXY FOR DOCKER]')
+        logging.info('[CONFIGURING PROXY FOR DOCKER]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   keyfile_name,
+                   notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/configure_proxy_for_docker.py {}".format(params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+
+    try:
+        print('[STARTING JUPYTER CONTAINER]')
+        logging.info('[STARTING JUPYTER CONTAINER]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   keyfile_name,
+                   notebook_config['dlab_ssh_user'])
+        try:
+           local("~/scripts/jupyterlab_container_start.py {}".format(params))
+        except:
+             traceback.print_exc()
+             raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to start Jupyter container.", str(err))
+        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    # generating output information
+    try:
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_size']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+
+        with open("/root/result.json", 'w') as result:
+            res = {"ip": ip_address,
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "instance_id": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_acces_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_acces_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
index fca9b2f..e2d481d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
@@ -22,16 +22,22 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import re
 import traceback
 from Crypto.PublicKey import RSA
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+from fabric.api import *
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/project/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
@@ -39,107 +45,132 @@
 
     try:
         print('Generating infrastructure names and tags')
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         project_conf = dict()
-        project_conf['service_base_name'] = os.environ['conf_service_base_name']
-        project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-        project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-        project_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-        project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+        project_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        project_conf['project_name'] = (os.environ['project_name'])
+        project_conf['project_tag'] = project_conf['project_name']
+        project_conf['endpoint_name'] = (os.environ['endpoint_name'])
+        project_conf['endpoint_tag'] = project_conf['endpoint_name']
         project_conf['resource_group_name'] = os.environ['azure_resource_group_name']
 
         project_conf['azure_ad_user_name'] = os.environ['azure_iam_user']
         project_conf['key_name'] = os.environ['conf_key_name']
-
+        project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
         project_conf['vpc_name'] = os.environ['azure_vpc_name']
         project_conf['subnet_name'] = os.environ['azure_subnet_name']
-        project_conf['private_subnet_name'] = project_conf['service_base_name'] + '-' + project_conf['project_name'] + '-subnet'
+        project_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(project_conf['service_base_name'],
+                                                                       project_conf['project_name'],
+                                                                       project_conf['endpoint_name'])
         if os.environ['conf_network_type'] == 'private':
             project_conf['static_public_ip_name'] = 'None'
         else:
-            project_conf['static_public_ip_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-edge-ip'
+            project_conf['static_public_ip_name'] = '{}-{}-{}-edge-static-ip'.format(project_conf['service_base_name'],
+                                                                                     project_conf['project_name'],
+                                                                                     project_conf['endpoint_name'])
         project_conf['region'] = os.environ['azure_region']
         project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-        project_conf['private_subnet_prefix'] = os.environ['azure_private_subnet_prefix']
+        project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
 
         project_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(project_conf['service_base_name'],
                                                                   project_conf['project_name'],
                                                                   project_conf['endpoint_tag'])
         project_conf['network_interface_name'] = '{0}-nif'.format(project_conf['instance_name'])
-        project_conf['primary_disk_name'] = project_conf['instance_name'] + '-disk0'
+        project_conf['primary_disk_name'] = project_conf['instance_name'] + '-volume-0'
         project_conf['edge_security_group_name'] = project_conf['instance_name'] + '-sg'
-        project_conf['notebook_security_group_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name']\
-            + '-nb-sg'
-        project_conf['master_security_group_name'] = project_conf['service_base_name'] + '-' \
-                                                    + project_conf['project_name'] + '-dataengine-master-sg'
-        project_conf['slave_security_group_name'] = project_conf['service_base_name'] + '-' \
-                                                   + project_conf['project_name'] + '-dataengine-slave-sg'
-        project_conf['edge_storage_account_name'] = '{0}-{1}-{2}-storage'.format(project_conf['service_base_name'],
+        project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
+                                                                               project_conf['project_name'],
+                                                                               project_conf['endpoint_name'])
+        project_conf['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(project_conf['service_base_name'],
+                                                                                    project_conf['project_name'],
+                                                                                    project_conf['endpoint_name'])
+        project_conf['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(project_conf['service_base_name'],
+                                                                                  project_conf['project_name'],
+                                                                                  project_conf['endpoint_name'])
+        project_conf['edge_storage_account_name'] = ('{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
                                                                                  project_conf['project_name'],
-                                                                                 project_conf['endpoint_name'])
-        project_conf['edge_container_name'] = (project_conf['service_base_name'] + '-' + project_conf['project_name'] +
-                                            '-container').lower()
-        project_conf['datalake_store_name'] = project_conf['service_base_name'] + '-ssn-datalake'
-        project_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(project_conf['service_base_name'],
-                                                                            project_conf['project_name'])
+                                                                                 project_conf['endpoint_name'])).lower()
+        project_conf['edge_container_name'] = ('{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+                                                                           project_conf['project_name'],
+                                                                           project_conf['endpoint_name'])).lower()
+        project_conf['datalake_store_name'] = '{}-ssn-datalake'.format(project_conf['service_base_name'])
+        project_conf['datalake_user_directory_name'] = '{0}-{1}-{2}-folder'.format(project_conf['service_base_name'],
+                                                                                   project_conf['project_name'],
+                                                                                   project_conf['endpoint_name'])
         ssh_key_path = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + '.pem'
         key = RSA.importKey(open(ssh_key_path, 'rb').read())
         project_conf['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
         project_conf['instance_storage_account_type'] = 'Premium_LRS'
         project_conf['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
         project_conf['instance_tags'] = {"Name": project_conf['instance_name'],
-                                        "SBN": project_conf['service_base_name'],
-                                        "project_tag": project_conf['project_tag'],
-                                        "endpoint_tag": project_conf['endpoint_tag'],
-                                        os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                                         "SBN": project_conf['service_base_name'],
+                                         "project_tag": project_conf['project_tag'],
+                                         "endpoint_tag": project_conf['endpoint_tag'],
+                                         os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         project_conf['storage_account_tags'] = {"Name": project_conf['edge_storage_account_name'],
                                                 "SBN": project_conf['service_base_name'],
                                                 "project_tag": project_conf['project_tag'],
                                                 "endpoint_tag": project_conf['endpoint_tag'],
-                                                os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                                                os.environ['conf_billing_tag_key']:
+                                                    os.environ['conf_billing_tag_value'],
+                                                project_conf['tag_name']: project_conf['edge_storage_account_name']}
         project_conf['primary_disk_size'] = '32'
+        project_conf['shared_storage_account_name'] = ('{0}-{1}-shared-bucket'.format(
+            project_conf['service_base_name'], project_conf['endpoint_name'])).lower()
+        project_conf['shared_container_name'] = ('{}-{}-shared-bucket'.format(project_conf['service_base_name'],
+                                                                              project_conf['endpoint_name'])).lower()
+        project_conf['shared_storage_account_tags'] = {"Name": project_conf['shared_storage_account_name'],
+                                                       "SBN": project_conf['service_base_name'],
+                                                       os.environ['conf_billing_tag_key']: os.environ[
+                                                       'conf_billing_tag_value'], "endpoint_tag":
+                                                           project_conf['endpoint_tag'],
+                                                       project_conf['tag_name']:
+                                                           project_conf['shared_storage_account_name']}
 
         # FUSE in case of absence of user's key
         try:
             project_conf['user_key'] = os.environ['key']
             try:
                 local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
-                                                        os.environ['project_name']))
+                                                        project_conf['project_name']))
             except:
                 print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
         except KeyError:
             print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
             sys.exit(1)
 
-        print("Will create exploratory environment with edge node as access point as following: {}".format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+        print("Will create exploratory environment with edge node as access point as following: {}".format(json.dumps(
+            project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
         logging.info(json.dumps(project_conf))
     except Exception as err:
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATE SUBNET]')
         print('[CREATE SUBNET]')
         params = "--resource_group_name {} --vpc_name {} --region {} --vpc_cidr {} --subnet_name {} --prefix {}".\
-            format(project_conf['resource_group_name'], project_conf['vpc_name'], project_conf['region'], project_conf['vpc_cidr'],
-                   project_conf['private_subnet_name'], project_conf['private_subnet_prefix'])
+            format(project_conf['resource_group_name'], project_conf['vpc_name'], project_conf['region'],
+                   project_conf['vpc_cidr'], project_conf['private_subnet_name'], project_conf['private_subnet_prefix'])
         try:
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                         project_conf['private_subnet_name'])
+            AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                       project_conf['private_subnet_name'])
         except:
             print("Subnet hasn't been created.")
-        append_result("Failed to create subnet.", str(err))
+        dlab.fab.append_result("Failed to create subnet.", str(err))
         sys.exit(1)
 
-    project_conf['private_subnet_cidr'] = AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                                              project_conf['private_subnet_name']).address_prefix
+    project_conf['private_subnet_cidr'] = AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                               project_conf['vpc_name'],
+                                                               project_conf['private_subnet_name']).address_prefix
     print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
 
     try:
@@ -191,6 +222,17 @@
                 "direction": "Inbound"
             },
             {
+                "name": "in-5",
+                "protocol": "Tcp",
+                "source_port_range": "*",
+                "destination_port_range": "443",
+                "source_address_prefix": "*",
+                "destination_address_prefix": "*",
+                "access": "Allow",
+                "priority": 140,
+                "direction": "Inbound"
+            },
+            {
                 "name": "out-1",
                 "protocol": "Tcp",
                 "source_port_range": "*",
@@ -401,20 +443,20 @@
             }
         ]
         params = "--resource_group_name {} --security_group_name {} --region {} --tags '{}' --list_rules '{}'". \
-            format(project_conf['resource_group_name'], project_conf['edge_security_group_name'], project_conf['region'],
-                   json.dumps(project_conf['instance_tags']), json.dumps(edge_list_rules))
+            format(project_conf['resource_group_name'], project_conf['edge_security_group_name'],
+                   project_conf['region'], json.dumps(project_conf['instance_tags']), json.dumps(edge_list_rules))
         try:
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except Exception as err:
-            AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                         project_conf['private_subnet_name'])
+            AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                       project_conf['private_subnet_name'])
             try:
-                AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                     project_conf['edge_security_group_name'])
+                AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                                   project_conf['edge_security_group_name'])
             except:
                 print("Edge Security group hasn't been created.")
             traceback.print_exc()
-            append_result("Failed creating security group for edge node.", str(err))
+            dlab.fab.append_result("Failed creating security group for edge node.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -439,7 +481,8 @@
                 "protocol": "*",
                 "source_port_range": "*",
                 "destination_port_range": "*",
-                "source_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                "source_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                              project_conf['vpc_name'],
                                                               project_conf['subnet_name']).address_prefix,
                 "destination_address_prefix": "*",
                 "access": "Allow",
@@ -474,8 +517,9 @@
                 "source_port_range": "*",
                 "destination_port_range": "*",
                 "source_address_prefix": "*",
-                "destination_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                                              project_conf['subnet_name']).address_prefix,
+                "destination_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                                   project_conf['vpc_name'],
+                                                                   project_conf['subnet_name']).address_prefix,
                 "access": "Allow",
                 "priority": 110,
                 "direction": "Outbound"
@@ -504,21 +548,22 @@
             }
             ]
         params = "--resource_group_name {} --security_group_name {} --region {} --tags '{}' --list_rules '{}'". \
-            format(project_conf['resource_group_name'], project_conf['notebook_security_group_name'], project_conf['region'],
-                   json.dumps(project_conf['instance_tags']), json.dumps(notebook_list_rules))
+            format(project_conf['resource_group_name'], project_conf['notebook_security_group_name'],
+                   project_conf['region'], json.dumps(project_conf['instance_tags']), json.dumps(notebook_list_rules))
         try:
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
+        dlab.fab.append_result("Failed creating security group for private subnet.", str(err))
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
         try:
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['notebook_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['notebook_security_group_name'])
         except:
             print("Notebook Security group hasn't been created.")
         sys.exit(1)
@@ -543,9 +588,9 @@
                 "protocol": "*",
                 "source_port_range": "*",
                 "destination_port_range": "*",
-                "source_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'],
-                                                                project_conf['vpc_name'],
-                                                                project_conf['subnet_name']).address_prefix,
+                "source_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                              project_conf['vpc_name'],
+                                                              project_conf['subnet_name']).address_prefix,
                 "destination_address_prefix": "*",
                 "access": "Allow",
                 "priority": 110,
@@ -579,9 +624,9 @@
                 "source_port_range": "*",
                 "destination_port_range": "*",
                 "source_address_prefix": "*",
-                "destination_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'],
-                                                                     project_conf['vpc_name'],
-                                                                     project_conf['subnet_name']).address_prefix,
+                "destination_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                                   project_conf['vpc_name'],
+                                                                   project_conf['subnet_name']).address_prefix,
                 "access": "Allow",
                 "priority": 110,
                 "direction": "Outbound"
@@ -618,18 +663,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['notebook_security_group_name'])
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
         try:
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['master_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['master_security_group_name'])
         except:
             print("Master Security group hasn't been created.")
-        append_result("Failed to create Security groups. Exception:" + str(err))
+        dlab.fab.append_result("Failed to create Security groups. Exception:" + str(err))
         sys.exit(1)
 
     logging.info('[CREATING SECURITY GROUPS FOR SLAVE NODES]')
@@ -644,20 +689,44 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['master_security_group_name'])
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
         try:
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['slave_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['slave_security_group_name'])
         except:
             print("Slave Security group hasn't been created.")
-        append_result("Failed to create Security groups. Exception:" + str(err))
+        dlab.fab.append_result("Failed to create Security groups. Exception:" + str(err))
+        sys.exit(1)
+
+    try:
+        logging.info('[CREATE SHARED STORAGE ACCOUNT AND CONTAINER]')
+        print('[CREATE SHARED STORAGE ACCOUNT AND CONTAINER]')
+        params = "--container_name {} --account_tags '{}' --resource_group_name {} --region {}". \
+            format(project_conf['shared_container_name'], json.dumps(project_conf['shared_storage_account_tags']),
+                   project_conf['resource_group_name'], project_conf['region'])
+        local("~/scripts/{}.py {}".format('common_create_storage_account', params))
+    except Exception as err:
+        dlab.fab.append_result("Failed to create storage account.", str(err))
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
+            if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
         sys.exit(1)
 
     try:
@@ -673,63 +742,71 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create storage account.", str(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['master_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+        dlab.fab.append_result("Failed to create storage account.", str(err))
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
             if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+            if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
         sys.exit(1)
 
     if os.environ['azure_datalake_enable'] == 'true':
         try:
             logging.info('[CREATE DATA LAKE STORE DIRECTORY]')
             print('[CREATE DATA LAKE STORE DIRECTORY]')
-            params = "--resource_group_name {} --datalake_name {} --directory_name {} --ad_user {} --service_base_name {}". \
-                format(project_conf['resource_group_name'], project_conf['datalake_store_name'],
-                       project_conf['datalake_user_directory_name'], project_conf['azure_ad_user_name'],
-                       project_conf['service_base_name'])
+            params = "--resource_group_name {} --datalake_name {} --directory_name {} --ad_user {} " \
+                     "--service_base_name {}".format(project_conf['resource_group_name'],
+                                                     project_conf['datalake_store_name'],
+                                                     project_conf['datalake_user_directory_name'],
+                                                     project_conf['azure_ad_user_name'],
+                                                     project_conf['service_base_name'])
             try:
                 local("~/scripts/{}.py {}".format('common_create_datalake_directory', params))
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create Data Lake Store directory.", str(err))
-            AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                         project_conf['private_subnet_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['master_security_group_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                     project_conf['slave_security_group_name'])
-            for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+            dlab.fab.append_result("Failed to create Data Lake Store directory.", str(err))
+            AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                       project_conf['private_subnet_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['edge_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['notebook_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['master_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['slave_security_group_name'])
+            for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
                 if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
-                    AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                    AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
+                    AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
             try:
-                for datalake in AzureMeta().list_datalakes(project_conf['resource_group_name']):
+                for datalake in AzureMeta.list_datalakes(project_conf['resource_group_name']):
                     if project_conf['datalake_store_name'] == datalake.tags["Name"]:
-                        AzureActions().remove_datalake_directory(datalake.name, project_conf['datalake_user_directory_name'])
-            except Exception as err:
-                print('Error: {0}'.format(err))
+                        AzureActions.remove_datalake_directory(datalake.name,
+                                                                 project_conf['datalake_user_directory_name'])
+            except:
                 print("Data Lake Store directory hasn't been created.")
             sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
+        project_conf['initial_user'] = 'ubuntu'
+        project_conf['sudo_group'] = 'sudo'
     if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
+        project_conf['initial_user'] = 'ec2-user'
+        project_conf['sudo_group'] = 'wheel'
 
     try:
         logging.info('[CREATE EDGE INSTANCE]')
@@ -739,10 +816,12 @@
             --dlab_ssh_user_name {} --public_ip_name {} --public_key '''{}''' --primary_disk_size {} \
             --instance_type {} --project_name {} --instance_storage_account_type {} --image_name {} --tags '{}'".\
             format(project_conf['instance_name'], os.environ['azure_edge_instance_size'], project_conf['region'],
-                   project_conf['vpc_name'], project_conf['network_interface_name'], project_conf['edge_security_group_name'],
-                   project_conf['subnet_name'], project_conf['service_base_name'], project_conf['resource_group_name'],
-                   initial_user, project_conf['static_public_ip_name'], project_conf['public_ssh_key'],
-                   project_conf['primary_disk_size'], 'edge', project_conf['project_name'], project_conf['instance_storage_account_type'],
+                   project_conf['vpc_name'], project_conf['network_interface_name'],
+                   project_conf['edge_security_group_name'], project_conf['subnet_name'],
+                   project_conf['service_base_name'], project_conf['resource_group_name'],
+                   project_conf['initial_user'], project_conf['static_public_ip_name'], project_conf['public_ssh_key'],
+                   project_conf['primary_disk_size'], 'edge', project_conf['project_name'],
+                   project_conf['instance_storage_account_type'],
                    project_conf['image_name'], json.dumps(project_conf['instance_tags']))
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
@@ -750,25 +829,29 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_instance(project_conf['resource_group_name'], project_conf['instance_name'])
+            AzureActions.remove_instance(project_conf['resource_group_name'], project_conf['instance_name'])
         except:
             print("The instance hasn't been created.")
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['master_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
             if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+            if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
         if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(project_conf['resource_group_name']):
+            for datalake in AzureMeta.list_datalakes(project_conf['resource_group_name']):
                 if project_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, project_conf['datalake_user_directory_name'])
-        append_result("Failed to create instance. Exception:" + str(err))
+                    AzureActions.remove_datalake_directory(datalake.name,
+                                                           project_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed to create instance. Exception:" + str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
index f1f07a0..765959f 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
@@ -22,127 +22,149 @@
 # ******************************************************************************
 
 import json
-import sys, time, os
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import logging
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import requests
+import traceback
 
 
 def terminate_edge_node(resource_group_name, service_base_name, project_tag, subnet_name, vpc_name):
     print("Terminating EDGE, notebook and dataengine virtual machines")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             try:
                 if project_tag == vm.tags["project_tag"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate edge instance.", str(err))
         sys.exit(1)
 
     print("Removing network interfaces")
     try:
-        for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+        for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
             try:
                 if project_tag == network_interface.tags["project_name"]:
-                    AzureActions().delete_network_if(resource_group_name, network_interface.name)
+                    AzureActions.delete_network_if(resource_group_name, network_interface.name)
                     print("Network interface {} has been removed".format(network_interface.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove network interfaces.", str(err))
         sys.exit(1)
 
     print("Removing static public IPs")
     try:
-        for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+        for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
             try:
                 if project_tag in static_public_ip.tags["project_tag"]:
-                    AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+                    AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
                     print("Static public IP {} has been removed".format(static_public_ip.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IP addresses.", str(err))
         sys.exit(1)
 
     print("Removing disks")
     try:
-        for disk in AzureMeta().list_disks(resource_group_name):
+        for disk in AzureMeta.list_disks(resource_group_name):
             try:
                 if project_tag in disk.tags["project_tag"]:
-                    AzureActions().remove_disk(resource_group_name, disk.name)
+                    AzureActions.remove_disk(resource_group_name, disk.name)
                     print("Disk {} has been removed".format(disk.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove volumes.", str(err))
         sys.exit(1)
 
     print("Removing storage account")
     try:
-        for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+        for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
             try:
                 if project_tag == storage_account.tags["project_tag"]:
-                    AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+                    AzureActions.remove_storage_account(resource_group_name, storage_account.name)
                     print("Storage account {} has been terminated".format(storage_account.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove storage accounts.", str(err))
         sys.exit(1)
 
     print("Deleting Data Lake Store directory")
     try:
-        for datalake in AzureMeta().list_datalakes(resource_group_name):
+        for datalake in AzureMeta.list_datalakes(resource_group_name):
             try:
                 if service_base_name == datalake.tags["SBN"]:
-                    AzureActions().remove_datalake_directory(datalake.name, project_tag + '-folder')
+                    AzureActions.remove_datalake_directory(datalake.name, project_tag + '-folder')
                     print("Data Lake Store directory {} has been deleted".format(project_tag + '-folder'))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove Data Lake.", str(err))
+        sys.exit(1)
+
+    print("Removing project specific images")
+    try:
+        for image in AzureMeta.list_images():
+            if service_base_name == image.tags["SBN"] and 'project_tag' in image.tags \
+                    and project_tag == image.tags["project_tag"]:
+                AzureActions.remove_image(resource_group_name, image.name)
+                print("Image {} has been removed".format(image.name))
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove images", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+        for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
             try:
                 if project_tag == sg.tags["project_tag"]:
-                    AzureActions().remove_security_group(resource_group_name, sg.name)
+                    AzureActions.remove_security_group(resource_group_name, sg.name)
                     print("Security group {} has been terminated".format(sg.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups.", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        AzureActions().remove_subnet(resource_group_name, vpc_name, subnet_name)
+        AzureActions.remove_subnet(resource_group_name, vpc_name, subnet_name)
         print("Private subnet {} has been terminated".format(subnet_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnets.", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     project_conf = dict()
     project_conf['service_base_name'] = os.environ['conf_service_base_name']
     project_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    project_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    project_conf['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-    project_conf['private_subnet_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-subnet'
+    project_conf['project_name'] = os.environ['project_name']
+    project_conf['project_tag'] = project_conf['project_name']
+    project_conf['endpoint_name'] = os.environ['endpoint_name']
+    project_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(project_conf['service_base_name'],
+                                                                   project_conf['project_name'],
+                                                                   project_conf['endpoint_name'])
     project_conf['vpc_name'] = os.environ['azure_vpc_name']
 
 
@@ -151,20 +173,60 @@
         print('[TERMINATE EDGE]')
         try:
             terminate_edge_node(project_conf['resource_group_name'], project_conf['service_base_name'],
-                                project_conf['project_tag'], project_conf['private_subnet_name'], project_conf['vpc_name'])
+                                project_conf['project_tag'], project_conf['private_subnet_name'],
+                                project_conf['vpc_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
+            dlab.fab.append_result("Failed to terminate edge.", str(err))
+            raise Exception
     except:
         sys.exit(1)
 
     try:
+        print('[KEYCLOAK PROJECT CLIENT DELETE]')
+        logging.info('[KEYCLOAK PROJECT CLIENT DELETE]')
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            os.environ['keycloak_auth_server_url'])
+        keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+                                                                    os.environ['keycloak_realm_name'])
+
+        keycloak_auth_data = {
+            "username": os.environ['keycloak_user'],
+            "password": os.environ['keycloak_user_password'],
+            "grant_type": "password",
+            "client_id": "admin-cli",
+        }
+
+        client_params = {
+            "clientId": "{}-{}-{}".format(project_conf['service_base_name'], project_conf['project_name'],
+                                          project_conf['endpoint_name'])
+        }
+
+        keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
+
+        keycloak_get_id_client = requests.get(keycloak_client_url, data=keycloak_auth_data, params=client_params,
+                                              headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
+                                                       "Content-Type": "application/json"})
+        json_keycloak_client_id = json.loads(keycloak_get_id_client.text)
+        keycloak_id_client = json_keycloak_client_id[0]['id']
+
+        keycloak_client_delete_url = '{0}/admin/realms/{1}/clients/{2}'.format(os.environ['keycloak_auth_server_url'],
+                                                                               os.environ['keycloak_realm_name'],
+                                                                               keycloak_id_client)
+
+        keycloak_client = requests.delete(keycloak_client_delete_url,
+                                          headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
+                                                   "Content-Type": "application/json"})
+    except Exception as err:
+        print("Failed to remove project client from Keycloak", str(err))
+
+    try:
         with open("/root/result.json", 'w') as result:
             res = {"service_base_name": os.environ['conf_service_base_name'],
                    "project_name": project_conf['project_name'],
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
index 4c8588c..8487238 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,33 +41,36 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
-        notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'],
-                                                                notebook_config['exploratory_name'])
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "project_tag": notebook_config['project_tag'],
@@ -77,15 +82,16 @@
                 notebook_config['service_base_name'],
                 notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "endpoint_tag": notebook_config['endpoint_tag'],
                                              "Exploratory": notebook_config['exploratory_name'],
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-        notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -94,46 +100,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                           notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        notebook_config['rstudio_pass'] = id_generator()
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                           edge_instance_name)
-
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +146,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -160,9 +164,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -178,9 +181,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring R_STUDIO and all dependencies
@@ -190,7 +192,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9} " \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9} " \
             .format(instance_hostname, keyfile_name,
                     os.environ['azure_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -198,15 +200,14 @@
                     notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_rstudio_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure rstudio.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure rstudio.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -222,9 +223,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -235,12 +235,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -256,49 +255,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
-
-                params = "--hostname {} --keyfile {} --os_user {} --rstudio_pass {}" \
-                    .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
-                            notebook_config['rstudio_pass'])
-                local("~/scripts/{}.py {}".format('rstudio_change_pass', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -323,23 +318,22 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         # generating output information
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         rstudio_ip_url = "http://" + ip_address + ":8787/"
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-        rstudio_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+        rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             notebook_config['exploratory_name'])
-        rstudio_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+        rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
             notebook_config['exploratory_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
@@ -365,9 +359,9 @@
                    "Action": "Create new notebook server",
                    "exploratory_url": [
                        {"description": "RStudio",
-                        "url": rstudio_notebook_acces_url},
+                        "url": rstudio_notebook_access_url},
                        {"description": "Ungit",
-                        "url": rstudio_ungit_acces_url}#,
+                        "url": rstudio_ungit_access_url}#,
                        #{"description": "RStudio (via tunnel)",
                        # "url": rstudio_ip_url},
                        #{"description": "Ungit (via tunnel)",
@@ -377,7 +371,6 @@
                    "exploratory_pass": notebook_config['rstudio_pass']}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
index fbf96c4..dbfd10d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
@@ -21,12 +21,15 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os, json
+import sys
+import os
+import json
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
 import traceback
 
 if __name__ == "__main__":
@@ -36,44 +39,71 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
+    def clear_resources():
+        AzureActions.remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
+        for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
+            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
+                AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+        if 'azure_security_group_name' not in os.environ:
+            AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+        if 'azure_subnet_name' not in os.environ:
+            AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                       ssn_conf['subnet_name'])
+        if 'azure_vpc_name' not in os.environ:
+            AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+        if 'azure_resource_group_name' not in os.environ:
+            AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
+
+
     try:
-        instance = 'ssn'
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
+        ssn_conf = dict()
+        ssn_conf['instance'] = 'ssn'
         
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
 
-        billing_enabled = True
-
-        ssn_conf = dict()
-        # We need to cut service_base_name to 12 symbols do to the Azure Name length limitation
-        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
+        ssn_conf['billing_enabled'] = True
+        # We need to cut service_base_name to 20 symbols do to the Azure Name length limitation
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         # Check azure predefined resources
-        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name', ssn_conf['service_base_name'])
+        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name',
+                                                         '{}-resource-group'.format(ssn_conf['service_base_name']))
         ssn_conf['vpc_name'] = os.environ.get('azure_vpc_name', '{}-vpc'.format(ssn_conf['service_base_name']))
-        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-ssn-subnet'.format(ssn_conf['service_base_name']))
-        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(ssn_conf['service_base_name']))
+        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-subnet'.format(ssn_conf['service_base_name']))
+        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(
+            ssn_conf['service_base_name']))
         # Default variables
         ssn_conf['region'] = os.environ['azure_region']
-        ssn_conf['ssn_storage_account_name'] = '{}-ssn-storage'.format(ssn_conf['service_base_name'])
-        ssn_conf['ssn_container_name'] = '{}-ssn-container'.format(ssn_conf['service_base_name']).lower()
         ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
-        ssn_conf['shared_storage_account_name'] = '{0}-{1}-shared-storage'.format(ssn_conf['service_base_name'],
-                                                                                  ssn_conf['default_endpoint_name'])
-        ssn_conf['shared_container_name'] = '{}-shared-container'.format(ssn_conf['service_base_name']).lower()
         ssn_conf['datalake_store_name'] = '{}-ssn-datalake'.format(ssn_conf['service_base_name'])
         ssn_conf['datalake_shared_directory_name'] = '{}-shared-folder'.format(ssn_conf['service_base_name'])
         ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
-        
-        ssn_conf['ssh_key_path'] = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + '.pem'
+        ssn_conf['ssh_key_path'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        ssn_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(ssn_conf['instance_name'],
+                                                                               ssn_conf['region'])
         if os.environ['conf_network_type'] == 'private':
-            ssn_conf['instnace_ip'] = AzureMeta().get_private_ip_address(ssn_conf['resource_group_name'],
-                                                                        ssn_conf['instance_name'])
+            ssn_conf['instnace_ip'] = AzureMeta.get_private_ip_address(ssn_conf['resource_group_name'],
+                                                                       ssn_conf['instance_name'])
+            ssn_conf['instance_host'] = ssn_conf['instnace_ip']
         else:
-            ssn_conf['instnace_ip'] = AzureMeta().get_instance_public_ip_address(ssn_conf['resource_group_name'],
-                                                                        ssn_conf['instance_name'])
-        ssn_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(ssn_conf['instance_name'], ssn_conf['region'])
+            ssn_conf['instnace_ip'] = AzureMeta.get_instance_public_ip_address(ssn_conf['resource_group_name'],
+                                                                               ssn_conf['instance_name'])
+            ssn_conf['instance_host'] = ssn_conf['instance_dns_name']
+
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            ssn_conf['step_cert_sans'] = ' --san {0} '.format(AzureMeta.get_private_ip_address(
+                ssn_conf['resource_group_name'], ssn_conf['instance_name']))
+            if os.environ['conf_network_type'] == 'public':
+                ssn_conf['step_cert_sans'] += ' --san {0} --san {1} '.format(
+                    AzureMeta.get_instance_public_ip_address(ssn_conf['resource_group_name'],
+                                                             ssn_conf['instance_name']),
+                    ssn_conf['instance_dns_name'])
+        else:
+            ssn_conf['step_cert_sans'] = ''
 
         try:
             if os.environ['azure_offer_number'] == '':
@@ -85,67 +115,49 @@
             if os.environ['azure_region_info'] == '':
                 raise KeyError
         except KeyError:
-            billing_enabled = False
-        if not billing_enabled:
+            ssn_conf['billing_enabled'] = False
+        if not ssn_conf['billing_enabled']:
             os.environ['azure_offer_number'] = 'None'
             os.environ['azure_currency'] = 'None'
             os.environ['azure_locale'] = 'None'
             os.environ['azure_region_info'] = 'None'
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            ssn_conf['initial_user'] = 'ubuntu'
+            ssn_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            ssn_conf['initial_user'] = 'ec2-user'
+            ssn_conf['sudo_group'] = 'wheel'
     except Exception as err:
-        print("Failed to generate variables dictionary." + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
         sys.exit(1)
 
-    def clear_resources():
-        if 'azure_resource_group_name' not in os.environ:
-            AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-        if 'azure_vpc_name' not in os.environ:
-            AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-        if 'azure_subnet_name' not in os.environ:
-            AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                            ssn_conf['subnet_name'])
-        if 'azure_security_group_name' not in os.environ:
-            AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(ssn_conf['resource_group_name']):
-            if ssn_conf['ssn_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-            if ssn_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-        for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
-            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
-        AzureActions().remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
-
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
         params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], initial_user, ssn_conf['dlab_ssh_user'], sudo_group)
+            (ssn_conf['instance_host'], ssn_conf['ssh_key_path'], ssn_conf['initial_user'], ssn_conf['dlab_ssh_user'],
+             ssn_conf['sudo_group'])
         local("~/scripts/{}.py {}".format('create_ssh_user', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
         print('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
-        params = "--hostname {} --keyfile {} --pip_packages 'backoff argparse fabric==1.14.0 pymongo pyyaml pycrypto azure==2.0.0' \
-            --user {} --region {}".format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'],
-                                          ssn_conf['dlab_ssh_user'], ssn_conf['region'])
+        params = "--hostname {} --keyfile {} --pip_packages 'backoff argparse fabric==1.14.0 pymongo pyyaml " \
+                 "pycrypto azure==2.0.0' --user {} --region {}".format(ssn_conf['instance_host'],
+                                                                       ssn_conf['ssh_key_path'],
+                                                                       ssn_conf['dlab_ssh_user'],
+                                                                       ssn_conf['region'])
         local("~/scripts/{}.py {}".format('install_prerequisites', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Failed installing software: pip, packages.", str(err))
+        dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
         sys.exit(1)
 
     try:
@@ -155,15 +167,16 @@
                              "service_base_name": ssn_conf['service_base_name'],
                              "security_group_id": ssn_conf['security_group_name'], "vpc_id": ssn_conf['vpc_name'],
                              "subnet_id": ssn_conf['subnet_name'], "admin_key": os.environ['conf_key_name']}
-        params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} --tag_resource_id {}". \
-            format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
-                   ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
+                 "--tag_resource_id {} --step_cert_sans '{}'". \
+            format(ssn_conf['instance_host'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
+                   ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'],
+                   ssn_conf['step_cert_sans'])
         local("~/scripts/{}.py {}".format('configure_ssn_node', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Failed configuring ssn.", str(err))
+        dlab.fab.append_result("Failed configuring ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -173,33 +186,34 @@
                              {"name": "edge", "tag": "latest"},
                              {"name": "project", "tag": "latest"},
                              {"name": "jupyter", "tag": "latest"},
+                             {"name": "jupyterlab", "tag": "latest"},
                              {"name": "rstudio", "tag": "latest"},
                              {"name": "zeppelin", "tag": "latest"},
                              {"name": "tensor", "tag": "latest"},
                              {"name": "deeplearning", "tag": "latest"},
                              {"name": "dataengine", "tag": "latest"}]
-        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} --cloud_provider {} --region {}". \
-            format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
-                   os.environ['conf_os_family'], ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
-                   os.environ['conf_cloud_provider'], ssn_conf['region'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
+                 "--cloud_provider {} --region {}".format(ssn_conf['instance_host'], ssn_conf['ssh_key_path'],
+                                                          json.dumps(additional_config), os.environ['conf_os_family'],
+                                                          ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
+                                                          os.environ['conf_cloud_provider'], ssn_conf['region'])
         local("~/scripts/{}.py {}".format('configure_docker', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Unable to configure docker.", str(err))
+        dlab.fab.append_result("Unable to configure docker.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE SSN INSTANCE UI]')
         print('[CONFIGURE SSN INSTANCE UI]')
-        azure_auth_path = '/home/{}/keys/azure_auth.json'.format(ssn_conf['dlab_ssh_user'])
-        ldap_login = 'false'
+        ssn_conf['azure_auth_path'] = '/home/{}/keys/azure_auth.json'.format(ssn_conf['dlab_ssh_user'])
+        ssn_conf['ldap_login'] = 'false'
 
         cloud_params = [
             {
                 'key': 'KEYCLOAK_REDIRECT_URI',
-                'value': "http://{0}/".format(ssn_conf['instnace_ip'])
+                'value': "https://{0}/".format(ssn_conf['instance_host'])
             },
             {
                 'key': 'KEYCLOAK_REALM_NAME',
@@ -218,6 +232,14 @@
                 'value': os.environ['keycloak_client_secret']
             },
             {
+                'key': 'KEYCLOAK_USER_NAME',
+                'value': os.environ['keycloak_user']
+            },
+            {
+                'key': 'KEYCLOAK_PASSWORD',
+                'value': os.environ['keycloak_user_password']
+            },
+            {
                 'key': 'CONF_OS',
                 'value': os.environ['conf_os_family']
             },
@@ -286,14 +308,6 @@
                 'value': ssn_conf['resource_group_name']
             },
             {
-                'key': 'AZURE_SSN_STORAGE_ACCOUNT_TAG',
-                'value': ssn_conf['ssn_storage_account_name']
-            },
-            {
-                'key': 'AZURE_SHARED_STORAGE_ACCOUNT_TAG',
-                'value': ssn_conf['shared_storage_account_name']
-            },
-            {
                 'key': 'GCP_PROJECT_ID',
                 'value': ''
             },
@@ -314,11 +328,64 @@
                 'value': os.environ['conf_image_enabled']
             },
             {
-                'key': 'SHARED_IMAGE_ENABLED',
-                'value': os.environ['conf_shared_image_enabled']
+                'key': "AZURE_AUTH_FILE_PATH",
+                'value': ""
             }
         ]
 
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            cloud_params.append(
+                {
+                    'key': 'STEP_CERTS_ENABLED',
+                    'value': os.environ['conf_stepcerts_enabled']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_ROOT_CA',
+                    'value': os.environ['conf_stepcerts_root_ca']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_ID',
+                    'value': os.environ['conf_stepcerts_kid']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_PASSWORD',
+                    'value': os.environ['conf_stepcerts_kid_password']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_CA_URL',
+                    'value': os.environ['conf_stepcerts_ca_url']
+                })
+        else:
+            cloud_params.append(
+                {
+                    'key': 'STEP_CERTS_ENABLED',
+                    'value': 'false'
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_ROOT_CA',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_ID',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_PASSWORD',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_CA_URL',
+                    'value': ''
+                })
+
         if os.environ['azure_datalake_enable'] == 'false':
             cloud_params.append(
                 {
@@ -331,11 +398,11 @@
                     'value': ''
                 })
             if os.environ['azure_oauth2_enabled'] == 'false':
-                ldap_login = 'true'
-            tenant_id = json.dumps(AzureMeta().sp_creds['tenantId']).replace('"', '')
-            subscription_id = json.dumps(AzureMeta().sp_creds['subscriptionId']).replace('"', '')
-            datalake_application_id = os.environ['azure_application_id']
-            datalake_store_name = None
+                ssn_conf['ldap_login'] = 'true'
+            ssn_conf['tenant_id'] = json.dumps(AzureMeta.sp_creds['tenantId']).replace('"', '')
+            ssn_conf['subscription_id'] = json.dumps(AzureMeta.sp_creds['subscriptionId']).replace('"', '')
+            ssn_conf['datalake_application_id'] = os.environ['azure_application_id']
+            ssn_conf['datalake_store_name'] = None
         else:
             cloud_params.append(
                 {
@@ -347,39 +414,36 @@
                     'key': 'AZURE_CLIENT_ID',
                     'value': os.environ['azure_application_id']
                 })
-            tenant_id = json.dumps(AzureMeta().sp_creds['tenantId']).replace('"', '')
-            subscription_id = json.dumps(AzureMeta().sp_creds['subscriptionId']).replace('"', '')
-            datalake_application_id = os.environ['azure_application_id']
-            for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+            ssn_conf['tenant_id'] = json.dumps(AzureMeta.sp_creds['tenantId']).replace('"', '')
+            ssn_conf['subscription_id'] = json.dumps(AzureMeta.sp_creds['subscriptionId']).replace('"', '')
+            ssn_conf['datalake_application_id'] = os.environ['azure_application_id']
+            for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
                 if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
                     datalake_store_name = datalake.name
         params = "--hostname {} --keyfile {} --dlab_path {} --os_user {} --os_family {} --request_id {} \
                  --resource {} --service_base_name {} --cloud_provider {} --billing_enabled {} --authentication_file {} \
                  --offer_number {} --currency {} --locale {} --region_info {}  --ldap_login {} --tenant_id {} \
                  --application_id {} --datalake_store_name {} --cloud_params '{}' --subscription_id {}  \
-                 --validate_permission_scope {} --default_endpoint_name {}". \
+                 --validate_permission_scope {} --default_endpoint_name {} --keycloak_client_id {} \
+                 --keycloak_client_secret {} --keycloak_auth_server_url {}". \
             format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], os.environ['ssn_dlab_path'],
                    ssn_conf['dlab_ssh_user'], os.environ['conf_os_family'], os.environ['request_id'],
                    os.environ['conf_resource'], ssn_conf['service_base_name'], os.environ['conf_cloud_provider'],
-                   billing_enabled, azure_auth_path, os.environ['azure_offer_number'],
+                   ssn_conf['billing_enabled'], ssn_conf['azure_auth_path'], os.environ['azure_offer_number'],
                    os.environ['azure_currency'], os.environ['azure_locale'], os.environ['azure_region_info'],
-                   ldap_login, tenant_id, datalake_application_id, datalake_store_name, json.dumps(cloud_params),
-                   subscription_id, os.environ['azure_validate_permission_scope'], ssn_conf['default_endpoint_name'])
+                   ssn_conf['ldap_login'], ssn_conf['tenant_id'], ssn_conf['datalake_application_id'], ssn_conf['datalake_store_name'], json.dumps(cloud_params),
+                   ssn_conf['subscription_id'], os.environ['azure_validate_permission_scope'], ssn_conf['default_endpoint_name'],
+                   os.environ['keycloak_client_name'], os.environ['keycloak_client_secret'],
+                   os.environ['keycloak_auth_server_url'])
         local("~/scripts/{}.py {}".format('configure_ui', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Unable to configure UI.", str(err))
+        dlab.fab.append_result("Unable to configure UI.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[SUMMARY]')
-        for storage_account in AzureMeta().list_storage_accounts(ssn_conf['resource_group_name']):
-            if ssn_conf['ssn_storage_account_name'] == storage_account.tags["Name"]:
-                ssn_storage_account_name = storage_account.name
-            if ssn_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                shared_storage_account_name = storage_account.name
 
         print('[SUMMARY]')
         print("Service base name: {}".format(ssn_conf['service_base_name']))
@@ -392,25 +456,22 @@
         print("Key name: {}".format(os.environ['conf_key_name']))
         print("VPC Name: {}".format(ssn_conf['vpc_name']))
         print("Subnet Name: {}".format(ssn_conf['subnet_name']))
-        print("Firewall Names: {}".format(ssn_conf['security_group_name']))
+        print("Security groups Names: {}".format(ssn_conf['security_group_name']))
         print("SSN instance size: {}".format(os.environ['azure_ssn_instance_size']))
-        print("SSN storage account name: {}".format(ssn_storage_account_name))
-        print("SSN container name: {}".format(ssn_conf['ssn_container_name']))
-        print("Shared storage account name: {}".format(shared_storage_account_name))
-        print("Shared container name: {}".format(ssn_conf['shared_container_name']))
+        ssn_conf['datalake_store_full_name'] = 'None'
         if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+            for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
                 if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    datalake_store_name = datalake.name
-            print("DataLake store name: {}".format(datalake_store_name))
+                    ssn_conf['datalake_store_full_name'] = datalake.name
+                    print("DataLake store name: {}".format(ssn_conf['datalake_store_full_name']))
             print("DataLake shared directory name: {}".format(ssn_conf['datalake_shared_directory_name']))
         print("Region: {}".format(ssn_conf['region']))
-        jenkins_url = "http://{}/jenkins".format(ssn_conf['instnace_ip'])
-        jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instnace_ip'])
+        jenkins_url = "http://{}/jenkins".format(ssn_conf['instance_host'])
+        jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instance_host'])
         print("Jenkins URL: {}".format(jenkins_url))
         print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
-        print("DLab UI HTTP URL: http://{}".format(ssn_conf['instnace_ip']))
-        print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instnace_ip']))
+        print("DLab UI HTTP URL: http://{}".format(ssn_conf['instance_host']))
+        print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instance_host']))
 
         try:
             with open('jenkins_creds.txt') as f:
@@ -423,32 +484,24 @@
             if os.environ['azure_datalake_enable'] == 'false':
                 res = {"service_base_name": ssn_conf['service_base_name'],
                        "instance_name": ssn_conf['instance_name'],
-                       "instance_hostname": ssn_conf['instnace_ip'],
+                       "instance_hostname": ssn_conf['instance_host'],
                        "master_keyname": os.environ['conf_key_name'],
                        "vpc_id": ssn_conf['vpc_name'],
                        "subnet_id": ssn_conf['subnet_name'],
                        "security_id": ssn_conf['security_group_name'],
                        "instance_shape": os.environ['azure_ssn_instance_size'],
-                       "ssn_storage_account_name": ssn_storage_account_name,
-                       "ssn_container_name": ssn_conf['ssn_container_name'],
-                       "shared_storage_account_name": shared_storage_account_name,
-                       "shared_container_name": ssn_conf['shared_container_name'],
                        "region": ssn_conf['region'],
                        "action": "Create SSN instance"}
             else:
                 res = {"service_base_name": ssn_conf['service_base_name'],
                        "instance_name": ssn_conf['instance_name'],
-                       "instance_hostname": ssn_conf['instnace_ip'],
+                       "instance_hostname": ssn_conf['instance_host'],
                        "master_keyname": os.environ['conf_key_name'],
                        "vpc_id": ssn_conf['vpc_name'],
                        "subnet_id": ssn_conf['subnet_name'],
                        "security_id": ssn_conf['security_group_name'],
                        "instance_shape": os.environ['azure_ssn_instance_size'],
-                       "ssn_storage_account_name": ssn_storage_account_name,
-                       "ssn_container_name": ssn_conf['ssn_container_name'],
-                       "shared_storage_account_name": shared_storage_account_name,
-                       "shared_container_name": ssn_conf['shared_container_name'],
-                       "datalake_name": datalake_store_name,
+                       "datalake_name": ssn_conf['datalake_store_full_name'],
                        "datalake_shared_directory_name": ssn_conf['datalake_shared_directory_name'],
                        "region": ssn_conf['region'],
                        "action": "Create SSN instance"}
@@ -458,5 +511,6 @@
         params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
             format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'], ssn_conf['instnace_ip'])
         local("~/scripts/{}.py {}".format('upload_response_file', params))
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
index e9dff4d..408f423 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
@@ -21,13 +21,17 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os, json
+import sys
+import os
+import json
 from fabric.api import *
-from dlab.ssn_lib import *
 from Crypto.PublicKey import RSA
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
 
 
 if __name__ == "__main__":
@@ -37,37 +41,38 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
-        instance = 'ssn'
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
+        ssn_conf = dict()
+        ssn_conf['instance'] = 'ssn'
 
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-
-        ssn_conf = dict()
         # Verify vpc deployment
-        if os.environ['conf_network_type'] == 'private' and os.environ.get('azure_vpc_name') == None and os.environ.get('azure_source_vpc_name') == None:
+        if os.environ['conf_network_type'] == 'private' and not os.environ.get('azure_vpc_name') \
+                and not os.environ.get('azure_source_vpc_name'):
             raise Exception('Not possible to deploy private environment without predefined vpc or without source vpc')
-        if os.environ['conf_network_type'] == 'private' and os.environ.get('azure_resource_group_name') == None and os.environ.get('azure_source_resource_group_name') == None:
-            raise Exception('Not possible to deploy private environment without predefined resource_group_name or source_group_name')
-        # We need to cut service_base_name to 12 symbols do to the Azure Name length limitation
-        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
+        if os.environ['conf_network_type'] == 'private' and not os.environ.get('azure_resource_group_name') \
+                and not os.environ.get('azure_source_resource_group_name'):
+            raise Exception('Not possible to deploy private environment without predefined resource_group_name '
+                            'or source_group_name')
+        # We need to cut service_base_name to 20 symbols do to the Azure Name length limitation
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         # Check azure predefined resources
-        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name', ssn_conf['service_base_name'])
-        ssn_conf['source_resource_group_name'] = os.environ.get('azure_source_resource_group_name', ssn_conf['resource_group_name'])
+        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name',
+                                                         '{}-resource-group'.format(ssn_conf['service_base_name']))
+        ssn_conf['source_resource_group_name'] = os.environ.get(
+            'azure_source_resource_group_name', '{}-resource-group'.format(ssn_conf['service_base_name']))
         ssn_conf['vpc_name'] = os.environ.get('azure_vpc_name', '{}-vpc'.format(ssn_conf['service_base_name']))
-        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-ssn-subnet'.format(ssn_conf['service_base_name']))
-        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(ssn_conf['service_base_name']))
+        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-subnet'.format(ssn_conf['service_base_name']))
+        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name',
+                                                         '{}-sg'.format(ssn_conf['service_base_name']))
         # Default variables
         ssn_conf['region'] = os.environ['azure_region']
         ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
         ssn_conf['subnet_prefix'] = '20'
         ssn_conf['ssn_image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
-        ssn_conf['ssn_storage_account_name'] = '{}-ssn-storage'.format(ssn_conf['service_base_name'])
-        ssn_conf['ssn_container_name'] = '{}-ssn-container'.format(ssn_conf['service_base_name']).lower()
-        ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
-        ssn_conf['shared_storage_account_name'] = '{0}-{1}-shared-storage'.format(ssn_conf['service_base_name'],
-                                                                                  ssn_conf['default_endpoint_name'])
-        ssn_conf['shared_container_name'] = '{}-shared-container'.format(ssn_conf['service_base_name']).lower()
         ssn_conf['datalake_store_name'] = '{}-ssn-datalake'.format(ssn_conf['service_base_name'])
         ssn_conf['datalake_shared_directory_name'] = '{}-shared-folder'.format(ssn_conf['service_base_name'])
         ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
@@ -75,29 +80,25 @@
         if os.environ['conf_network_type'] == 'private':
             ssn_conf['static_public_ip_name'] = 'None'      
         else:
-            ssn_conf['static_public_ip_name'] = '{}-ssn-ip'.format(ssn_conf['service_base_name'])
-        key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']), 'rb').read())
+            ssn_conf['static_public_ip_name'] = '{}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+        ssn_conf['key'] = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'],
+                                                               os.environ['conf_key_name']), 'rb').read())
         ssn_conf['instance_storage_account_type'] = 'Premium_LRS'
-        ssn_conf['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
+        ssn_conf['public_ssh_key'] = ssn_conf['key'].publickey().exportKey("OpenSSH")
         ssn_conf['instance_tags'] = {"Name": ssn_conf['instance_name'],
                                      "SBN": ssn_conf['service_base_name'],
                                      os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        ssn_conf['ssn_storage_account_tags'] = {"Name": ssn_conf['ssn_storage_account_name'],
-                                                "SBN": ssn_conf['service_base_name'],
-                                                os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        ssn_conf['shared_storage_account_tags'] = {"Name": ssn_conf['shared_storage_account_name'],
-                                                   "SBN": ssn_conf['service_base_name'],
-                                                   os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+
         ssn_conf['datalake_store_tags'] = {"Name": ssn_conf['datalake_store_name'],
                                            "SBN": ssn_conf['service_base_name'],
                                            os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         ssn_conf['primary_disk_size'] = '32'
     except Exception as err:
-        print("Failed to generate variables dictionary." + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
-    if AzureMeta().get_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name']):
-        print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+    if AzureMeta.get_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name']):
+        dlab.fab.append_result("Service base name should be unique and less or equal 20 symbols. Please try again.")
         sys.exit(1)
 
     try:
@@ -111,8 +112,7 @@
             local("~/scripts/{}.py {}".format('ssn_create_resource_group', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating resource group: ' + str(err))
-        append_result("Failed to create Resource Group. Exception: " + str(err))
+        dlab.fab.append_result("Failed to create Resource Group.", str(err))
         sys.exit(1)
     
     try:
@@ -127,13 +127,12 @@
             local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating VPC: ' + str(err))
+        dlab.fab.append_result("Failed to create VPC.", str(err))
         try:
             if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
-            print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create VPC. Exception: " + str(err))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
         sys.exit(1)
   
     try:
@@ -149,15 +148,15 @@
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating Subnet: ' + str(err))
+        dlab.fab.append_result("Failed to create Subnet.", str(err))
         try:
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
             if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
-            print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create Subnet. Exception: " + str(err))
+            print("Resources hasn't been removed: {}".format(str(err)))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
         sys.exit(1)
     
     try:
@@ -165,20 +164,21 @@
             logging.info('[CREATING VPC PEERING]')
             print("[CREATING VPC PEERING]")
             params = "--source_resource_group_name {} --destination_resource_group_name {} " \
-            "--source_virtual_network_name {} --destination_virtual_network_name {}".format(ssn_conf['source_resource_group_name'], 
-                        ssn_conf['resource_group_name'], os.environ['azure_source_vpc_name'], ssn_conf['vpc_name'])
+                     "--source_virtual_network_name {} --destination_virtual_network_name {}".format(
+                      ssn_conf['source_resource_group_name'], ssn_conf['resource_group_name'],
+                      os.environ['azure_source_vpc_name'], ssn_conf['vpc_name'])
             local("~/scripts/{}.py {}".format('ssn_create_peering', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating VPC peering: ' + str(err))
         try:
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
             if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
             print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create VPC peering. Exception: " + str(err))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
+        dlab.fab.append_result("Failed to create VPC peering.", str(err))
         sys.exit(1)
 
     try:
@@ -240,70 +240,18 @@
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating Security group: ' + str(err))
+        dlab.fab.append_result("Error creating Security group", str(err))
         try:
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-            if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
             if 'azure_subnet_name' not in os.environ:
-                AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                             ssn_conf['subnet_name'])
+                AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                           ssn_conf['subnet_name'])
+            if 'azure_vpc_name' not in os.environ:
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
             print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create Security group. Exception: " + str(err))
-        sys.exit(1)
-
-    try:
-        logging.info('[CREATE SSN STORAGE ACCOUNT AND CONTAINER]')
-        print('[CREATE SSN STORAGE ACCOUNT AND CONTAINER]')
-        params = "--container_name {} --account_tags '{}' --resource_group_name {} --region {}". \
-                 format(ssn_conf['ssn_container_name'], json.dumps(ssn_conf['ssn_storage_account_tags']),
-                        ssn_conf['resource_group_name'], ssn_conf['region'])
-        local("~/scripts/{}.py {}".format('common_create_storage_account', params))
-    except Exception as err:
-        traceback.print_exc()
-        print('Error: {0}'.format(err))
-        if 'azure_resource_group_name' not in os.environ:
-            AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-        if 'azure_vpc_name' not in os.environ:
-            AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-        if 'azure_subnet_name' not in os.environ:
-            AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                            ssn_conf['subnet_name'])
-        if 'azure_security_group_name' not in os.environ:
-            AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(ssn_conf['resource_group_name']):
-            if ssn_conf['ssn_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-        append_result("Failed to create SSN storage account and container. Exception:" + str(err))
-        sys.exit(1)
-
-    try:
-        logging.info('[CREATE SHARED STORAGE ACCOUNT AND CONTAINER]')
-        print('[CREATE SHARED STORAGE ACCOUNT AND CONTAINER]')
-        params = "--container_name {} --account_tags '{}' --resource_group_name {} --region {}". \
-            format(ssn_conf['shared_container_name'], json.dumps(ssn_conf['shared_storage_account_tags']),
-                   ssn_conf['resource_group_name'], ssn_conf['region'])
-        local("~/scripts/{}.py {}".format('common_create_storage_account', params))
-    except Exception as err:
-        traceback.print_exc()
-        print('Error: {0}'.format(err))
-        if 'azure_resource_group_name' not in os.environ:
-            AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-        if 'azure_vpc_name' not in os.environ:
-            AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-        if 'azure_subnet_name' not in os.environ:
-            AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                            ssn_conf['subnet_name'])
-        if 'azure_security_group_name' not in os.environ:
-            AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(ssn_conf['resource_group_name']):
-            if ssn_conf['ssn_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-            if ssn_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-        append_result("Failed to create SSN storage account and container. Exception:" + str(err))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
         sys.exit(1)
 
     if os.environ['azure_datalake_enable'] == 'true':
@@ -332,23 +280,19 @@
                 raise Exception
         except Exception as err:
             traceback.print_exc()
-            print('Error: {0}'.format(err))
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-            if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-            if 'azure_subnet_name' not in os.environ:
-                AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                                ssn_conf['subnet_name'])
-            if 'azure_security_group_name' not in os.environ:
-                AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-            for storage_account in AzureMeta().list_storage_accounts(ssn_conf['resource_group_name']):
-                if ssn_conf['ssn_storage_account_name'] == storage_account.tags["Name"]:
-                    AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-            for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+            dlab.fab.append_result("Failed to create Data Lake Store.", str(err))
+            for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
                 if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
-            append_result("Failed to create Data Lake Store. Exception:" + str(err))
+                    AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+            if 'azure_security_group_name' not in os.environ:
+                AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+            if 'azure_subnet_name' not in os.environ:
+                AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                           ssn_conf['subnet_name'])
+            if 'azure_vpc_name' not in os.environ:
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
             sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
@@ -374,27 +318,22 @@
         local("~/scripts/{}.py {}".format('common_create_instance', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error: {0}'.format(err))
-        if 'azure_resource_group_name' not in os.environ:
-            AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-        if 'azure_vpc_name' not in os.environ:
-            AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-        if 'azure_subnet_name' not in os.environ:
-            AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                            ssn_conf['subnet_name'])
-        if 'azure_security_group_name' not in os.environ:
-            AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(ssn_conf['resource_group_name']):
-            if ssn_conf['ssn_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-            if ssn_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(ssn_conf['resource_group_name'], storage_account.name)
-        for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
-            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+        dlab.fab.append_result("Failed to create instance.", str(err))
         try:
-            AzureActions().remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
+            AzureActions.remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
         except:
             print("The instance {} hasn't been created".format(ssn_conf['instance_name']))
-        append_result("Failed to create instance. Exception:" + str(err))
-        sys.exit(1)
\ No newline at end of file
+        for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
+            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
+                AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+        if 'azure_security_group_name' not in os.environ:
+            AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+        if 'azure_subnet_name' not in os.environ:
+            AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                       ssn_conf['subnet_name'])
+        if 'azure_vpc_name' not in os.environ:
+            AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+        if 'azure_resource_group_name' not in os.environ:
+            AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
+
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
index bf2f91e..c709929 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
@@ -21,111 +21,115 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import sys
+import os
 from fabric.api import *
-from dlab.ssn_lib import *
+import logging
+import traceback
+import json
 
 
 def terminate_ssn_node(resource_group_name, service_base_name, vpc_name, region):
     print("Terminating instances")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if service_base_name == vm.tags["SBN"]:
-                AzureActions().remove_instance(resource_group_name, vm.name)
+                AzureActions.remove_instance(resource_group_name, vm.name)
                 print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instances", str(err))
         sys.exit(1)
 
     print("Removing network interfaces")
     try:
-        for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+        for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
             if service_base_name == network_interface.tags["SBN"]:
-                AzureActions().delete_network_if(resource_group_name, network_interface.name)
+                AzureActions.delete_network_if(resource_group_name, network_interface.name)
                 print("Network interface {} has been removed".format(network_interface.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove network interfaces", str(err))
         sys.exit(1)
 
     print("Removing static public IPs")
     try:
-        for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+        for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
             if service_base_name == static_public_ip.tags["SBN"]:
-                AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+                AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
                 print("Static public IP {} has been removed".format(static_public_ip.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IPs", str(err))
         sys.exit(1)
 
     print("Removing disks")
     try:
-        for disk in AzureMeta().list_disks(resource_group_name):
+        for disk in AzureMeta.list_disks(resource_group_name):
             if service_base_name == disk.tags["SBN"]:
-                AzureActions().remove_disk(resource_group_name, disk.name)
+                AzureActions.remove_disk(resource_group_name, disk.name)
                 print("Disk {} has been removed".format(disk.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove disks", str(err))
         sys.exit(1)
 
     print("Removing storage accounts")
     try:
-        for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+        for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
             if service_base_name == storage_account.tags["SBN"]:
-                AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+                AzureActions.remove_storage_account(resource_group_name, storage_account.name)
                 print("Storage account {} has been terminated".format(storage_account.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove storage accounts", str(err))
         sys.exit(1)
 
     print("Removing Data Lake Store")
     try:
-        for datalake in AzureMeta().list_datalakes(resource_group_name):
+        for datalake in AzureMeta.list_datalakes(resource_group_name):
             if service_base_name == datalake.tags["SBN"]:
-                AzureActions().delete_datalake_store(resource_group_name, datalake.name)
+                AzureActions.delete_datalake_store(resource_group_name, datalake.name)
                 print("Data Lake Store {} has been terminated".format(datalake.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove Data Lake", str(err))
         sys.exit(1)
 
     print("Removing images")
     try:
-        for image in AzureMeta().list_images():
+        for image in AzureMeta.list_images():
             if service_base_name == image.tags["SBN"]:
-                AzureActions().remove_image(resource_group_name, image.name)
+                AzureActions.remove_image(resource_group_name, image.name)
                 print("Image {} has been removed".format(image.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove images", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+        for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
             if service_base_name == sg.tags["SBN"]:
-                AzureActions().remove_security_group(resource_group_name, sg.name)
+                AzureActions.remove_security_group(resource_group_name, sg.name)
                 print("Security group {} has been terminated".format(sg.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing VPC")
     try:
-        if AzureMeta().get_vpc(resource_group_name, service_base_name + '-vpc'):
-            AzureActions().remove_vpc(resource_group_name, vpc_name)
+        if AzureMeta.get_vpc(resource_group_name, service_base_name + '-vpc'):
+            AzureActions.remove_vpc(resource_group_name, vpc_name)
             print("VPC {} has been terminated".format(vpc_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove VPC", str(err))
         sys.exit(1)
 
     print("Removing Resource Group")
     try:
-        if AzureMeta().get_resource_group(service_base_name):
-            AzureActions().remove_resource_group(service_base_name, region)
+        if AzureMeta.get_resource_group(resource_group_name):
+            AzureActions.remove_resource_group(resource_group_name, region)
             print("Resource group {} has been terminated".format(vpc_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove resource group", str(err))
         sys.exit(1)
 
 
@@ -136,12 +140,14 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     ssn_conf = dict()
-    ssn_conf['service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
-    ssn_conf['resource_group_name'] = replace_multi_symbols(
-        os.environ['azure_resource_group_name'].replace('_', '-')[:12], '-', True)
+    ssn_conf['service_base_name'] = dlab.fab.replace_multi_symbols(os.environ['conf_service_base_name'][:20],
+                                                                   '-', True)
+    ssn_conf['resource_group_name'] = os.environ.get(
+            'azure_source_resource_group_name', '{}-resource-group'.format(ssn_conf['service_base_name']))
     ssn_conf['region'] = os.environ['azure_region']
     ssn_conf['vpc_name'] = os.environ['azure_vpc_name']
 
@@ -155,8 +161,7 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to terminate ssn.", str(err))
+        dlab.fab.append_result("Failed to terminate ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -165,6 +170,6 @@
                    "Action": "Terminate ssn with all service_base_name environment"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
index 203829a..914f686 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -41,33 +43,36 @@
                         filename=local_log_filepath)
 
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
-        notebook_config['instance_name'] = '{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'],
-                                                                notebook_config['exploratory_name'])
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "project_tag": notebook_config['project_tag'],
@@ -79,15 +84,16 @@
                 notebook_config['service_base_name'],
                 notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "endpoint_tag": notebook_config['endpoint_tag'],
                                              "Exploratory": notebook_config['exploratory_name'],
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-        notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -96,44 +102,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                       notebook_config['project_name'],
-                                                       notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+                                                     notebook_config['project_name'],
+                                                     notebook_config['endpoint_name'])
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                    edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        append_result("Failed to generate variables dictionary", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +148,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -152,16 +158,16 @@
         print('[CONFIGURE PROXY ON TENSOR INSTANCE]')
         additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
         params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -177,9 +183,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and all dependencies
@@ -188,21 +193,20 @@
         print('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
         params = "--hostname {0} --keyfile {1} " \
                  "--region {2} --os_user {3} " \
-                 "--ip_adress {4} --exploratory_name {5} --edge_ip {6}" \
+                 "--ip_address {4} --exploratory_name {5} --edge_ip {6}" \
                  .format(instance_hostname, keyfile_name,
                          os.environ['azure_region'], notebook_config['dlab_ssh_user'],
                          notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_tensor_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure TensorFlow.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -218,9 +222,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -231,12 +234,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -252,44 +254,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -314,26 +317,25 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # generating output information
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         tensorboard_url = "http://" + ip_address + ":6006/"
         jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-        jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             notebook_config['exploratory_name'])
-        tensorboard_acces_url = "http://" + edge_instance_hostname + "/{}-tensor/".format(
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
             notebook_config['exploratory_name'])
-        jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
             notebook_config['exploratory_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
@@ -360,11 +362,11 @@
                    "Action": "Create new notebook server",
                    "exploratory_url": [
                        {"description": "Jupyter",
-                        "url": jupyter_notebook_acces_url},
+                        "url": jupyter_notebook_access_url},
                        {"description": "TensorBoard",
-                        "url": tensorboard_acces_url},
+                        "url": tensorboard_access_url},
                        {"description": "Ungit",
-                        "url": jupyter_ungit_acces_url}#,
+                        "url": jupyter_ungit_access_url}#,
                        #{"description": "Jupyter (via tunnel)",
                        # "url": jupyter_ip_url},
                        #{"description": "TensorBoard (via tunnel)",
@@ -374,7 +376,6 @@
                    ]}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
index 64e9498..91eb529 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,33 +42,36 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_keyname'] = os.environ['project_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
-        notebook_config['instance_name'] = '{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'],
-                                                                notebook_config['exploratory_name'])
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "project_tag": notebook_config['project_tag'],
@@ -78,15 +83,16 @@
                 notebook_config['service_base_name'],
                 notebook_config['endpoint_name'],
                 os.environ['application'])
-            notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+            notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
                                              "SBN": notebook_config['service_base_name'],
                                              "User": notebook_config['user_name'],
                                              "endpoint_tag": notebook_config['endpoint_tag'],
                                              "Exploratory": notebook_config['exploratory_name'],
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-        notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -94,46 +100,46 @@
                                    "project_tag": notebook_config['project_tag'],
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
-                                   "product": "dlab"}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+                                   os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                           edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +147,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -160,9 +165,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -178,9 +182,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring zeppelin and all dependencies
@@ -199,25 +202,24 @@
                  "--zeppelin_version {10} --scala_version {11} " \
                  "--livy_version {12} --multiple_clusters {13} " \
                  "--r_mirror {14} --endpoint_url {15} " \
-                 "--ip_adress {16} --exploratory_name {17} --edge_ip {18} " \
+                 "--ip_address {16} --exploratory_name {17} --edge_ip {18} " \
             .format(instance_hostname, notebook_config['instance_name'], keyfile_name, os.environ['azure_region'],
-                    json.dumps(additional_config), notebook_config['dlab_ssh_user'], os.environ['notebook_spark_version'],
-                    os.environ['notebook_hadoop_version'], edge_instance_private_hostname, '3128',
-                    os.environ['notebook_zeppelin_version'], os.environ['notebook_scala_version'],
-                    os.environ['notebook_livy_version'], os.environ['notebook_multiple_clusters'],
-                    os.environ['notebook_r_mirror'], 'null',
+                    json.dumps(additional_config), notebook_config['dlab_ssh_user'],
+                    os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
+                    edge_instance_private_hostname, '3128', os.environ['notebook_zeppelin_version'],
+                    os.environ['notebook_scala_version'], os.environ['notebook_livy_version'],
+                    os.environ['notebook_multiple_clusters'], os.environ['notebook_r_mirror'], 'null',
                     notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_zeppelin_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure zeppelin.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -233,9 +235,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -246,12 +247,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -267,44 +267,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -329,23 +330,22 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # generating output information
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         zeppelin_ip_url = "http://" + ip_address + ":8080/"
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-        zeppelin_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+        zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             notebook_config['exploratory_name'])
-        zeppelin_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+        zeppelin_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
             notebook_config['exploratory_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
@@ -369,9 +369,9 @@
                    "Action": "Create new notebook server",
                    "exploratory_url": [
                        {"description": "Apache Zeppelin",
-                        "url": zeppelin_notebook_acces_url},
+                        "url": zeppelin_notebook_access_url},
                        {"description": "Ungit",
-                        "url": zeppelin_ungit_acces_url}#,
+                        "url": zeppelin_ungit_access_url}#,
                        #{"description": "Apache Zeppelin (via tunnel)",
                        # "url": zeppelin_ip_url},
                        #{"description": "Ungit (via tunnel)",
@@ -379,7 +379,6 @@
                    ]}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_create_bucket.py b/infrastructure-provisioning/src/general/scripts/gcp/common_create_bucket.py
index c75e718..1ac223f 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_create_bucket.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_create_bucket.py
@@ -30,6 +30,7 @@
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--bucket_name', type=str, default='')
+parser.add_argument('--tags', type=str, default='')
 args = parser.parse_args()
 
 
@@ -40,7 +41,7 @@
         else:
             print("Creating Bucket {}".format(args.bucket_name))
             GCPActions().create_bucket(args.bucket_name)
-            GCPActions().add_bucket_label(args.bucket_name)
+            GCPActions().add_bucket_labels(args.bucket_name, json.loads(args.tags))
     else:
         parser.print_help()
         sys.exit(2)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_create_instance.py b/infrastructure-provisioning/src/general/scripts/gcp/common_create_instance.py
index 25b39ae..271b609 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_create_instance.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_create_instance.py
@@ -48,6 +48,7 @@
 parser.add_argument('--gpu_accelerator_type', type=str, default='None')
 parser.add_argument('--network_tag', type=str, default='')
 parser.add_argument('--cluster_name', type=str, default='')
+parser.add_argument('--service_base_name', type=str, default='')
 args = parser.parse_args()
 
 
@@ -57,7 +58,7 @@
             print("REQUESTED INSTANCE {} ALREADY EXISTS".format(args.instance_name))
         else:
             print("Creating Instance {}".format(args.instance_name))
-            GCPActions().create_instance(args.instance_name, args.cluster_name, args.region, args.zone,
+            GCPActions().create_instance(args.instance_name, args.service_base_name, args.cluster_name, args.region, args.zone,
                                          args.vpc_name, args.subnet_name,
                                          args.instance_size, args.ssh_key_path, args.initial_user, args.image_name,
                                          args.secondary_image_name, args.service_account_name, args.instance_class,
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_create_service_account.py b/infrastructure-provisioning/src/general/scripts/gcp/common_create_service_account.py
index a0f5046..ce02d6f 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_create_service_account.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_create_service_account.py
@@ -31,16 +31,18 @@
 parser.add_argument('--role_name', type=str, default='')
 parser.add_argument('--policy_path', type=str, default='')
 parser.add_argument('--roles_path', type=str, default='')
+parser.add_argument('--unique_index', type=str, default='')
+parser.add_argument('--service_base_name', type=str, default='')
 args = parser.parse_args()
 
 
 if __name__ == "__main__":
     if args.service_account_name != '':
-        if GCPMeta().get_service_account(args.service_account_name):
+        if GCPMeta().get_service_account(args.service_account_name, args.service_base_name):
             print("REQUESTED SERVICE ACCOUNT {} ALREADY EXISTS".format(args.service_account_name))
         else:
             print("Creating Service account {}".format(args.service_account_name))
-            GCPActions().create_service_account(args.service_account_name)
+            GCPActions().create_service_account(args.service_account_name, args.service_base_name, args.unique_index)
             if GCPMeta().get_role(args.role_name):
                 if GCPMeta().get_role_status(args.role_name) == True:
                     print('Restoring deleted role')
@@ -57,14 +59,14 @@
                 print("Creating Role {}".format(args.role_name))
                 GCPActions().create_role(args.role_name, permissions)
             print("Assigning custom role to Service account.")
-            GCPActions().set_role_to_service_account(args.service_account_name, args.role_name)
+            GCPActions().set_role_to_service_account(args.service_account_name, args.role_name, args.service_base_name)
             if args.roles_path != '':
                 print("Assigning predefined roles to Service account.")
                 with open(args.roles_path, 'r') as f:
                     json_file = f.read()
                 predefined_roles = json.loads(json_file)
                 for role in predefined_roles:
-                    GCPActions().set_role_to_service_account(args.service_account_name, role, 'predefined')
+                    GCPActions().set_role_to_service_account(args.service_account_name, role, args.service_base_name, 'predefined')
     else:
         parser.print_help()
         sys.exit(2)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_create_subnet.py b/infrastructure-provisioning/src/general/scripts/gcp/common_create_subnet.py
index 7131764..eacf2a1 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_create_subnet.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_create_subnet.py
@@ -36,57 +36,82 @@
 parser.add_argument('--vpc_selflink', type=str, default='')
 parser.add_argument('--prefix', type=str, default='')
 parser.add_argument('--vpc_cidr', type=str, default='')
+parser.add_argument('--ssn', type=bool, default=False)
+parser.add_argument('--user_subnets_range', type=str, default='')
 args = parser.parse_args()
 
 
 if __name__ == "__main__":
-    empty_vpc = False
-    private_subnet_size = ipaddress.ip_network(u'0.0.0.0/{}'.format(args.prefix)).num_addresses
-    subnets_cidr = []
-    try:
-        subnets = GCPMeta().get_vpc(args.vpc_selflink.split('/')[-1])['subnetworks']
-    except KeyError:
-        empty_vpc = True
-        subnets = []
-    for subnet in subnets:
-        subnets_cidr.append(GCPMeta().get_subnet(subnet.split('/')[-1], args.region)['ipCidrRange'])
-    sortkey = lambda addr: \
-        (int(addr.split("/")[0].split(".")[0]),
-         int(addr.split("/")[0].split(".")[1]),
-         int(addr.split("/")[0].split(".")[2]),
-         int(addr.split("/")[0].split(".")[3]),
-         int(addr.split("/")[1]))
-    sorted_subnets_cidr = sorted(subnets_cidr, key=sortkey)
+    if args.user_subnets_range == '' or args.ssn:
+        empty_vpc = False
+        private_subnet_size = ipaddress.ip_network(u'0.0.0.0/{}'.format(args.prefix)).num_addresses
+        subnets_cidr = []
+        try:
+            subnets = GCPMeta().get_vpc(args.vpc_selflink.split('/')[-1])['subnetworks']
+        except KeyError:
+            empty_vpc = True
+            subnets = []
+        for subnet in subnets:
+            subnets_cidr.append(GCPMeta().get_subnet(subnet.split('/')[-1], args.region)['ipCidrRange'])
+        sortkey = lambda addr: \
+            (int(addr.split("/")[0].split(".")[0]),
+             int(addr.split("/")[0].split(".")[1]),
+             int(addr.split("/")[0].split(".")[2]),
+             int(addr.split("/")[0].split(".")[3]),
+             int(addr.split("/")[1]))
+        sorted_subnets_cidr = sorted(subnets_cidr, key=sortkey)
 
-    if not empty_vpc:
-        last_ip = int(ipaddress.IPv4Address(sorted_subnets_cidr[0].split('/')[0].decode("utf-8")))
-    else:
-        last_ip = int(ipaddress.IPv4Address(args.vpc_cidr.split('/')[0].decode("utf-8")))
-    previous_subnet_size = private_subnet_size
-    for cidr in sorted_subnets_cidr:
-        first_ip = int(ipaddress.IPv4Address(cidr.split('/')[0].decode("utf-8")))
-        if first_ip - last_ip < private_subnet_size or previous_subnet_size < private_subnet_size:
-            subnet_size = ipaddress.ip_network(u'{}'.format(cidr)).num_addresses
-            last_ip = first_ip + subnet_size - 1
-            previous_subnet_size = subnet_size
+        if not empty_vpc:
+            last_ip = int(ipaddress.IPv4Address(sorted_subnets_cidr[0].split('/')[0].decode("utf-8")))
         else:
-            break
+            last_ip = int(ipaddress.IPv4Address(args.vpc_cidr.split('/')[0].decode("utf-8")))
+        previous_subnet_size = private_subnet_size
+        for cidr in sorted_subnets_cidr:
+            first_ip = int(ipaddress.IPv4Address(cidr.split('/')[0].decode("utf-8")))
+            if first_ip - last_ip < private_subnet_size or previous_subnet_size < private_subnet_size:
+                subnet_size = ipaddress.ip_network(u'{}'.format(cidr)).num_addresses
+                last_ip = first_ip + subnet_size - 1
+                previous_subnet_size = subnet_size
+            else:
+                break
 
-    dlab_subnet_cidr = ''
-    if empty_vpc:
-        dlab_subnet_cidr = '{0}/{1}'.format(ipaddress.ip_address(last_ip), args.prefix)
-    else:
-        if previous_subnet_size < private_subnet_size:
-            while True:
-                try:
-                    dlab_subnet_cidr = '{0}/{1}'.format(ipaddress.ip_address(last_ip + 1), args.prefix)
-                    ipaddress.ip_network(dlab_subnet_cidr.decode('utf-8'))
-                    break
-                except ValueError:
-                    last_ip = last_ip + 2
-                    continue
+        dlab_subnet_cidr = ''
+        if empty_vpc:
+            dlab_subnet_cidr = '{0}/{1}'.format(ipaddress.ip_address(last_ip), args.prefix)
         else:
-            dlab_subnet_cidr = '{0}/{1}'.format(ipaddress.ip_address(last_ip + 1), args.prefix)
+            if previous_subnet_size < private_subnet_size:
+                while True:
+                    try:
+                        dlab_subnet_cidr = '{0}/{1}'.format(ipaddress.ip_address(last_ip + 1), args.prefix)
+                        ipaddress.ip_network(dlab_subnet_cidr.decode('utf-8'))
+                        break
+                    except ValueError:
+                        last_ip = last_ip + 2
+                        continue
+            else:
+                dlab_subnet_cidr = '{0}/{1}'.format(ipaddress.ip_address(last_ip + 1), args.prefix)
+    else:
+        pre_defined_subnet_list = []
+        subnet_cidr = args.user_subnets_range.split('-')[0].replace(' ', '')
+        pre_defined_subnet_list.append(subnet_cidr)
+        while str(subnet_cidr) != args.user_subnets_range.split('-')[1].replace(' ', ''):
+            subnet = ipaddress.ip_network(u'{}'.format(subnet_cidr))
+            num_addr = subnet.num_addresses
+            first_ip = int(ipaddress.IPv4Address(u'{}'.format(subnet.network_address)))
+            next_subnet = ipaddress.ip_network(u'{}/{}'.format(ipaddress.ip_address(first_ip + num_addr),
+                                                               args.prefix))
+            pre_defined_subnet_list.append(next_subnet.compressed)
+            subnet_cidr = next_subnet
+        existed_subnet_list = []
+        response = GCPMeta().get_vpc(args.vpc_selflink.split('/')[-1])['subnetworks']
+        for subnet in response:
+            existed_subnet_list.append(GCPMeta().get_subnet(subnet.split('/')[-1], args.region)['ipCidrRange'])
+        available_subnets = list(set(pre_defined_subnet_list) - set(existed_subnet_list))
+        if not available_subnets:
+            print("There is no available subnet to create. Aborting...")
+            sys.exit(1)
+        else:
+            dlab_subnet_cidr = available_subnets[0]
 
     if args.subnet_name != '':
         if GCPMeta().get_subnet(args.subnet_name, args.region):
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
index 311a805..f39c138 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    GCPActions.delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
+    GCPActions.remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
+                              os.environ['dataproc_version'], os.environ['conf_os_user'],
+                              notebook_config['key_path'])
 
 
 if __name__ == "__main__":
@@ -40,62 +49,72 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+    notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+    notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    notebook_config['project_tag'] = notebook_config['project_name']
+    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+    notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
     notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                  notebook_config['project_name'],
                                                                  notebook_config['endpoint_name'])
-    notebook_config['cluster_name'] = meta_lib.GCPMeta().get_not_configured_dataproc(notebook_config['notebook_name'])
-    notebook_config['notebook_ip'] = meta_lib.GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+    notebook_config['cluster_name'] = GCPMeta.get_not_configured_dataproc(notebook_config['notebook_name'])
+    notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
     notebook_config['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
     edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                    notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = meta_lib.GCPMeta().get_private_ip_address(edge_instance_name)
+    edge_instance_hostname = GCPMeta.get_private_ip_address(edge_instance_name)
     if os.environ['application'] == 'deeplearning':
         application = 'jupyter'
     else:
         application = os.environ['application']
+
+    additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "" ).replace(
+        "'}", "").lower()
+
     notebook_config['cluster_labels'] = {
         os.environ['notebook_instance_name']: "configured",
         "name": notebook_config['cluster_name'],
         "sbn": notebook_config['service_base_name'],
-        "user": notebook_config['edge_user_name'],
         "notebook_name": os.environ['notebook_instance_name'],
-        "project_tag": notebook_config['project_tag'],
-        "endpoint_tag": notebook_config['endpoint_tag'],
         "product": "dlab",
-        "computational_name": (os.environ['computational_name']).lower().replace('_', '-')
+        "computational_name": (os.environ['computational_name'].replace('_', '-').lower())
     }
 
+    for tag in additional_tags.split(','):
+        label_key = tag.split(':')[0]
+        label_value = tag.split(':')[1].replace('_', '-')
+        if '@' in label_value:
+            label_value = label_value[:label_value.find('@')]
+        if label_value != '':
+            notebook_config['cluster_labels'].update({label_key: label_value})
+
     try:
         logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
         print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
-        params = "--bucket {} --cluster_name {} --dataproc_version {} --keyfile {} --notebook_ip {} --region {} --edge_user_name {} --project_name {} --os_user {}  --edge_hostname {} --proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
+        params = "--bucket {} --cluster_name {} --dataproc_version {} --keyfile {} --notebook_ip {} --region {} " \
+                 "--edge_user_name {} --project_name {} --os_user {}  --edge_hostname {} --proxy_port {} " \
+                 "--scala_version {} --application {} --pip_mirror {}" \
             .format(notebook_config['bucket_name'], notebook_config['cluster_name'], os.environ['dataproc_version'],
                     notebook_config['key_path'], notebook_config['notebook_ip'], os.environ['gcp_region'],
-                    notebook_config['edge_user_name'], notebook_config['project_name'], os.environ['conf_os_user'], edge_instance_hostname, '3128',
-                    os.environ['notebook_scala_version'], os.environ['application'], os.environ['conf_pypi_mirror'])
+                    notebook_config['edge_user_name'], notebook_config['project_name'], os.environ['conf_os_user'],
+                    edge_instance_hostname, '3128', os.environ['notebook_scala_version'], os.environ['application'],
+                    os.environ['conf_pypi_mirror'])
         try:
             local("~/scripts/{}_{}.py {}".format(application, 'install_dataengine-service_kernels', params))
-            actions_lib.GCPActions().update_dataproc_cluster(notebook_config['cluster_name'],
-                                                             notebook_config['cluster_labels'])
+            GCPActions.update_dataproc_cluster(notebook_config['cluster_name'], notebook_config['cluster_labels'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing Dataproc kernels.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
-        actions_lib.GCPActions().remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
-                                                os.environ['dataproc_version'], os.environ['conf_os_user'], notebook_config['key_path'])
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataproc kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -113,11 +132,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Spark.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
-        actions_lib.GCPActions().remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
-                                                os.environ['dataproc_version'], os.environ['conf_os_user'], notebook_config['key_path'])
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -127,6 +143,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
index adf1f0b..08c4c02 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    for i in range(notebook_config['instance_count'] - 1):
+        slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+        GCPActions.remove_instance(slave_name, notebook_config['zone'])
+    GCPActions.remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
 
 
 if __name__ == "__main__":
@@ -40,53 +49,53 @@
                         filename=local_log_filepath)
 
     try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
         # generating variables dictionary
         print('Generating infrastructure names and tags')
         notebook_config = dict()
-        try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
             notebook_config['exploratory_name'] = ''
-        try:
-            notebook_config['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
             notebook_config['computational_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['region'] = os.environ['gcp_region']
         notebook_config['zone'] = os.environ['gcp_zone']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
-                                          '-de-' + notebook_config['exploratory_name'] + '-' + \
-                                          notebook_config['computational_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+        notebook_config['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        notebook_config['cluster_name'] = "{}-{}-{}-de-{}".format(notebook_config['service_base_name'],
+                                                                  notebook_config['project_name'],
+                                                                  notebook_config['endpoint_name'],
+                                                                  notebook_config['computational_name'])
         notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
         notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-        notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+        notebook_config['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
         try:
-            notebook_config['spark_master_ip'] = GCPMeta().get_private_ip_address(notebook_config['master_node_name'])
-            notebook_config['notebook_ip'] = GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+            notebook_config['spark_master_ip'] = GCPMeta.get_private_ip_address(notebook_config['master_node_name'])
+            notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
         except Exception as err:
-            print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to get instance IP address", str(err))
             sys.exit(1)
         notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
 
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
-        append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
         sys.exit(1)
 
     try:
         logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
         print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
-        params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4} --keyfile {5}" \
-                 " --notebook_ip {6} --spark_master_ip {7}".\
+        params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
+                 " --keyfile {5} --notebook_ip {6} --spark_master_ip {7}".\
             format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
                    notebook_config['spark_master_url'], notebook_config['key_path'],
@@ -97,12 +106,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
-        append_result("Failed installing Dataengine kernels.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -122,12 +127,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
-        append_result("Failed to configure Spark.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
         sys.exit(1)
 
     try:
@@ -136,6 +137,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
index 4814c52..c83208b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
@@ -24,10 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import os
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
+
 
 if __name__ == "__main__":
     instance_class = 'notebook'
@@ -37,130 +40,159 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    print('Generating infrastructure names and tags')
-    notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['region'] = os.environ['gcp_region']
-    notebook_config['zone'] = os.environ['gcp_zone']
-
-    edge_status = GCPMeta().get_instance_status('{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                                          notebook_config['project_name'],
-                                                                          notebook_config['endpoint_tag']))
-    if edge_status != 'RUNNING':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = GCPMeta().get_private_ip_address(notebook_config['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                            ssn_hostname)
-        append_result("Edge node is unavailable")
-        sys.exit(1)
-
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        notebook_config = dict()
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['region'] = os.environ['gcp_region']
+        notebook_config['zone'] = os.environ['gcp_zone']
+
+        edge_status = GCPMeta.get_instance_status('{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                                            notebook_config['project_name'],
+                                                                            notebook_config['endpoint_tag']))
+        if edge_status != 'RUNNING':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            ssn_hostname = GCPMeta.get_private_ip_address(notebook_config['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                notebook_config['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            notebook_config['vpc_name'] = '{}-vpc'.format(notebook_config['service_base_name'])
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['subnet_name'] = '{0}-{1}-{2}-subnet'.format(notebook_config['service_base_name'],
+                                                                     notebook_config['project_name'],
+                                                                     notebook_config['endpoint_tag'])
+        notebook_config['instance_size'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['notebook_service_account_name'] = '{}-{}-{}-ps-sa'.format(notebook_config['service_base_name'],
+                                                                                   notebook_config['project_name'],
+                                                                                   notebook_config['endpoint_name'])
+
+        if os.environ['conf_os_family'] == 'debian':
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(
+            os.environ['application'])
+        notebook_config['secondary_disk_size'] = os.environ['notebook_disk_size']
+
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_tag'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_tag'],
+                os.environ['application'])
         else:
-            notebook_config['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        notebook_config['vpc_name'] = '{}-ssn-vpc'.format(notebook_config['service_base_name'])
-    try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['subnet_name'] = '{0}-{1}-subnet'.format(notebook_config['service_base_name'],
-                                                             notebook_config['project_name'])
-    notebook_config['instance_size'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['notebook_service_account_name'] = '{}-{}-ps'.format(notebook_config['service_base_name'],
-                                                                         notebook_config['project_name']).replace('_', '-')
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+        notebook_config['notebook_primary_image_name'] = \
+            (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
+             else notebook_config['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
+        print('Searching pre-configured images')
+        notebook_config['primary_image_name'] = GCPMeta.get_image_by_name(
+            notebook_config['expected_primary_image_name'])
+        if notebook_config['primary_image_name'] == '':
+            notebook_config['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        else:
+            print('Pre-configured primary image found. Using: {}'.format(
+                notebook_config['primary_image_name'].get('name')))
+            notebook_config['primary_image_name'] = 'global/images/{}'.format(
+                notebook_config['primary_image_name'].get('name'))
 
-    if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
-    if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
-    notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(os.environ['application'])
-    notebook_config['secondary_disk_size'] = os.environ['notebook_disk_size']
+        notebook_config['secondary_image_name'] = GCPMeta.get_image_by_name(
+            notebook_config['expected_secondary_image_name'])
+        if notebook_config['secondary_image_name'] == '':
+            notebook_config['secondary_image_name'] = 'None'
+        else:
+            print('Pre-configured secondary image found. Using: {}'.format(
+                notebook_config['secondary_image_name'].get('name')))
+            notebook_config['secondary_image_name'] = 'global/images/{}'.format(
+                notebook_config['secondary_image_name'].get('name'))
 
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-    notebook_config['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
-        else notebook_config['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
-    print('Searching pre-configured images')
-    notebook_config['primary_image_name'] = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
-    if notebook_config['primary_image_name'] == '':
-        notebook_config['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    else:
-        print('Pre-configured primary image found. Using: {}'.format(notebook_config['primary_image_name'].get('name')))
-        notebook_config['primary_image_name'] = 'global/images/{}'.format(notebook_config['primary_image_name'].get('name'))
+        notebook_config['gpu_accelerator_type'] = 'None'
 
-    notebook_config['secondary_image_name'] = GCPMeta().get_image_by_name(notebook_config['expected_secondary_image_name'])
-    if notebook_config['secondary_image_name'] == '':
-        notebook_config['secondary_image_name'] = 'None'
-    else:
-        print('Pre-configured secondary image found. Using: {}'.format(notebook_config['secondary_image_name'].get('name')))
-        notebook_config['secondary_image_name'] = 'global/images/{}'.format(notebook_config['secondary_image_name'].get('name'))
+        if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
+            notebook_config['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
 
-    notebook_config['gpu_accelerator_type'] = 'None'
+        notebook_config['network_tag'] = '{0}-{1}-{2}-ps'.format(notebook_config['service_base_name'],
+                                                                 notebook_config['project_name'],
+                                                                 notebook_config['endpoint_name'])
 
-    if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
-        notebook_config['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
+        with open('/root/result.json', 'w') as f:
+            data = {"notebook_name": notebook_config['instance_name'], "error": ""}
+            json.dump(data, f)
 
-    notebook_config['network_tag'] = '{0}-{1}-ps'.format(notebook_config['service_base_name'],
-                                                         notebook_config['project_name'])
+        additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "" ).replace(
+            "'}", "").lower()
 
-    additional_tags = os.environ['tags'].replace("': u'", ": ").replace("', u'", ", ").replace("{u'", "" ).replace("'}", "")
-    print('Additional tags will be added: {}'.format(additional_tags))
+        print('Additional tags will be added: {}'.format(additional_tags))
+        notebook_config['labels'] = {"name": notebook_config['instance_name'],
+                                     "sbn": notebook_config['service_base_name'],
+                                     "product": "dlab"
+                                     }
 
-    notebook_config['labels'] = {"name": notebook_config['instance_name'],
-                                 "sbn": notebook_config['service_base_name'],
-                                 "project_tag": notebook_config['project_tag'],
-                                 "endpoint_tag": notebook_config['endpoint_tag'],
-                                 "user": notebook_config['edge_user_name'],
-                                 "product": "dlab",
-                                 }
+        for tag in additional_tags.split(','):
+            label_key = tag.split(':')[0]
+            label_value = tag.split(':')[1].replace('_', '-')
+            if '@' in label_value:
+                label_value = label_value[:label_value.find('@')]
+            if label_value != '':
+                notebook_config['labels'].update({label_key: label_value})
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
     # launching instance for notebook server
     try:
         logging.info('[CREATE NOTEBOOK INSTANCE]')
         print('[CREATE NOTEBOOK INSTANCE]')
         params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
                  "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
-                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
-                 "--gpu_accelerator_type {14} --network_tag {15} --labels '{16}'".\
+                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+                 "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --labels '{16}' " \
+                 "--service_base_name {17}".\
             format(notebook_config['instance_name'], notebook_config['region'], notebook_config['zone'],
                    notebook_config['vpc_name'], notebook_config['subnet_name'], notebook_config['instance_size'],
-                   notebook_config['ssh_key_path'], initial_user, notebook_config['notebook_service_account_name'],
-                   notebook_config['primary_image_name'], notebook_config['secondary_image_name'], 'notebook',
-                   notebook_config['primary_disk_size'], notebook_config['secondary_disk_size'],
-                   notebook_config['gpu_accelerator_type'], notebook_config['network_tag'],
-                   json.dumps(notebook_config['labels']))
+                   notebook_config['ssh_key_path'], notebook_config['initial_user'],
+                   notebook_config['notebook_service_account_name'], notebook_config['primary_image_name'],
+                   notebook_config['secondary_image_name'], 'notebook', notebook_config['primary_disk_size'],
+                   notebook_config['secondary_disk_size'], notebook_config['gpu_accelerator_type'],
+                   notebook_config['network_tag'], json.dumps(notebook_config['labels']),
+                   notebook_config['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        GCPActions().remove_disk(notebook_config['instance_name'], notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        GCPActions.remove_disk(notebook_config['instance_name'], notebook_config['zone'])
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
index b9c8a08..2d8fc8e 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 import argparse
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,9 +42,11 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['zone'] = os.environ['gcp_zone']
 
@@ -51,10 +55,10 @@
         print('[START NOTEBOOK]')
         try:
             print("Starting notebook")
-            GCPActions().start_instance(notebook_config['notebook_name'], notebook_config['zone'])
+            GCPActions.start_instance(notebook_config['notebook_name'], notebook_config['zone'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to start notebook.", str(err))
+            dlab.fab.append_result("Failed to start notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -62,7 +66,7 @@
     try:
         logging.info('[SETUP USER GIT CREDENTIALS]')
         print('[SETUP USER GIT CREDENTIALS]')
-        notebook_config['notebook_ip'] = GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+        notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
         notebook_config['keyfile'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
@@ -70,7 +74,7 @@
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to setup git credentials.", str(err))
+            dlab.fab.append_result("Failed to setup git credentials.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -84,7 +88,7 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -101,8 +105,6 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
index f336a0b..bcd431b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
@@ -24,9 +24,9 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
 import argparse
@@ -39,31 +39,31 @@
         labels = [
             {instance_name: '*'}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                computational_name = meta_lib.GCPMeta().get_cluster(cluster_name).get('labels').get(
+                computational_name = GCPMeta.get_cluster(cluster_name).get('labels').get(
                     'computational_name')
-                cluster = meta_lib.GCPMeta().get_list_cluster_statuses([cluster_name])
-                actions_lib.GCPActions().bucket_cleanup(bucket_name, project_name, cluster_name)
+                cluster = GCPMeta.get_list_cluster_statuses([cluster_name])
+                GCPActions.bucket_cleanup(bucket_name, project_name, cluster_name)
                 print('The bucket {} has been cleaned successfully'.format(bucket_name))
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
-                actions_lib.GCPActions().remove_kernels(instance_name, cluster_name, cluster[0]['version'], ssh_user,
-                                                        key_path, computational_name)
+                GCPActions.remove_kernels(instance_name, cluster_name, cluster[0]['version'], ssh_user,
+                                          key_path, computational_name)
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataproc", str(err))
         sys.exit(1)
 
     print("Stopping data engine cluster")
     try:
-        clusters_list = GCPMeta().get_list_instances_by_label(zone, instance_name)
+        clusters_list = GCPMeta.get_list_instances_by_label(zone, instance_name)
         if clusters_list.get('items'):
             for vm in clusters_list['items']:
                 try:
-                    GCPActions().stop_instance(vm['name'], zone)
+                    GCPActions.stop_instance(vm['name'], zone)
                     print("Instance {} has been stopped".format(vm['name']))
                 except:
                     pass
@@ -71,15 +71,14 @@
             print("There are no data engine clusters to terminate.")
 
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop dataengine cluster", str(err))
         sys.exit(1)
 
     print("Stopping notebook")
     try:
-        GCPActions().stop_instance(instance_name, zone)
+        GCPActions.stop_instance(instance_name, zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop instance", str(err))
         sys.exit(1)
 
 
@@ -92,12 +91,14 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+    notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+    notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                  notebook_config['project_name'],
@@ -115,7 +116,7 @@
                       notebook_config['project_name'])
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop notebook.", str(err))
         sys.exit(1)
 
     try:
@@ -124,7 +125,6 @@
                    "Action": "Stop notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
index 4b243a0..00d39f5 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
@@ -24,9 +24,10 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 
@@ -37,26 +38,26 @@
         labels = [
             {instance_name: '*'}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                actions_lib.GCPActions().bucket_cleanup(bucket_name, user_name, cluster_name)
+                GCPActions.bucket_cleanup(bucket_name, user_name, cluster_name)
                 print('The bucket {} has been cleaned successfully'.format(bucket_name))
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataproc", str(err))
         sys.exit(1)
 
     print("Terminating data engine cluster")
     try:
-        clusters_list = GCPMeta().get_list_instances_by_label(zone, instance_name)
+        clusters_list = GCPMeta.get_list_instances_by_label(zone, instance_name)
         if clusters_list.get('items'):
             for vm in clusters_list['items']:
                 try:
-                    GCPActions().remove_instance(vm['name'], zone)
+                    GCPActions.remove_instance(vm['name'], zone)
                     print("Instance {} has been terminated".format(vm['name']))
                 except:
                     pass
@@ -64,15 +65,14 @@
             print("There are no data engine clusters to terminate.")
 
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine", str(err))
         sys.exit(1)
 
     print("Terminating notebook")
     try:
-        GCPActions().remove_instance(instance_name, zone)
+        GCPActions.remove_instance(instance_name, zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to terminate notebook.", str(err))
+        dlab.fab.append_result("Failed to terminate instance", str(err))
         sys.exit(1)
 
 
@@ -84,12 +84,14 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+    notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+    notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                  notebook_config['project_name'],
@@ -106,7 +108,7 @@
                          notebook_config['project_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate notebook.", str(err))
+            dlab.fab.append_result("Failed to terminate notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -117,6 +119,6 @@
                    "Action": "Terminate notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
index 73d348a..05b9c9b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
@@ -24,10 +24,11 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
-from dlab.notebook_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.notebook_lib
+import traceback
 import sys
 import os
 import logging
@@ -35,7 +36,7 @@
 
 
 def configure_dataengine_service(instance, dataproc_conf):
-    dataproc_conf['instance_ip'] = meta_lib.GCPMeta().get_private_ip_address(instance)
+    dataproc_conf['instance_ip'] = GCPMeta.get_private_ip_address(instance)
     # configuring proxy on Data Engine service
     try:
         logging.info('[CONFIGURE PROXY ON DATAENGINE SERVICE]')
@@ -50,9 +51,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
 
     try:
@@ -62,16 +62,15 @@
             env['connection_attempts'] = 100
             env.key_filename = "{}".format(dataproc_conf['key_path'])
             env.host_string = dataproc_conf['dlab_ssh_user'] + '@' + dataproc_conf['instance_ip']
-            install_os_pkg(['python-pip', 'python3-pip'])
-            configure_data_engine_service_pip(dataproc_conf['instance_ip'], dataproc_conf['dlab_ssh_user'],
-                                              dataproc_conf['key_path'])
+            dlab.notebook_lib.install_os_pkg(['python-pip', 'python3-pip'])
+            dlab.fab.configure_data_engine_service_pip(dataproc_conf['instance_ip'], dataproc_conf['dlab_ssh_user'],
+                                                       dataproc_conf['key_path'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure dataengine service.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure dataengine service.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
 
     try:
@@ -79,7 +78,7 @@
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         slaves = []
         for idx, instance in enumerate(dataproc_conf['cluster_core_instances']):
-            slave_ip = meta_lib.GCPMeta().get_private_ip_address(instance)
+            slave_ip = GCPMeta.get_private_ip_address(instance)
             slave = {
                 'name': 'datanode{}'.format(idx + 1),
                 'ip': slave_ip,
@@ -108,12 +107,11 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure reverse proxy.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure reverse proxy.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
 
 
@@ -124,53 +122,66 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.INFO,
                         filename=local_log_filepath)
-    print('Generating infrastructure names and tags')
-    dataproc_conf = dict()
     try:
-        dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['exploratory_name'] = ''
-    try:
-        dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['computational_name'] = ''
-    dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    dataproc_conf['key_name'] = os.environ['conf_key_name']
-    dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    dataproc_conf['region'] = os.environ['gcp_region']
-    dataproc_conf['zone'] = os.environ['gcp_zone']
-    dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'],
-                                                      dataproc_conf['project_name'])
-    dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'],
-                                                                 dataproc_conf['project_name'],
-                                                                 dataproc_conf['exploratory_name'],
-                                                                 dataproc_conf['computational_name'])
-    dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
-                                                       dataproc_conf['project_name'])
-    dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        dataproc_conf = dict()
+        if 'exploratory_name' in os.environ:
+            dataproc_conf['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            dataproc_conf['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['computational_name'] = ''
+        dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+        dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        dataproc_conf['key_name'] = os.environ['conf_key_name']
+        dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        dataproc_conf['region'] = os.environ['gcp_region']
+        dataproc_conf['zone'] = os.environ['gcp_zone']
+        dataproc_conf['subnet'] = '{0}-{1}-{2}-subnet'.format(dataproc_conf['service_base_name'],
+                                                              dataproc_conf['project_name'],
+                                                              dataproc_conf['endpoint_name'])
+        dataproc_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}'.format(dataproc_conf['service_base_name'],
+                                                                     dataproc_conf['project_name'],
+                                                                     dataproc_conf['endpoint_name'],
+                                                                     dataproc_conf['computational_name'])
+        dataproc_conf['cluster_tag'] = '{0}-{1}-{2}-ps'.format(dataproc_conf['service_base_name'],
                                                                dataproc_conf['project_name'],
                                                                dataproc_conf['endpoint_name'])
-    dataproc_conf['release_label'] = os.environ['dataproc_version']
-    dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
-    dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
-                                                                         dataproc_conf['project_name'])
-    service_account_email = "{}@{}.iam.gserviceaccount.com".format(dataproc_conf['dataproc_service_account_name'],
-                                                                   os.environ['gcp_project_id'])
+        dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+                                                                   dataproc_conf['project_name'],
+                                                                   dataproc_conf['endpoint_name'])
+        dataproc_conf['release_label'] = os.environ['dataproc_version']
+        dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
+        dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-{2}-ps-sa'.format(dataproc_conf['service_base_name'],
+                                                                                    dataproc_conf['project_name'],
+                                                                                    dataproc_conf['endpoint_name'])
+        dataproc_conf['dataproc_unique_index'] = GCPMeta.get_index_by_service_account_name(
+            dataproc_conf['dataproc_service_account_name'])
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
+                                                                          dataproc_conf['dataproc_unique_index'],
+                                                                          os.environ['gcp_project_id'])
 
-    dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
-                                                                    dataproc_conf['project_name'],
-                                                                    dataproc_conf['endpoint_name'])
-    dataproc_conf['edge_instance_hostname'] = GCPMeta().get_instance_public_ip_by_name(
-        dataproc_conf['edge_instance_name'])
-    dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-    dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
-    dataproc_conf['master_ip'] = meta_lib.GCPMeta().get_private_ip_address(dataproc_conf['master_name'])
+        dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
+                                                                        dataproc_conf['project_name'],
+                                                                        dataproc_conf['endpoint_name'])
+        dataproc_conf['edge_instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(
+            dataproc_conf['edge_instance_name'])
+        dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
+        dataproc_conf['master_ip'] = GCPMeta.get_private_ip_address(dataproc_conf['master_name'])
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        sys.exit(1)
 
     try:
-        res = meta_lib.GCPMeta().get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
+        res = GCPMeta.get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
         dataproc_conf['cluster_instances'] = [i.get('name') for i in res['items']]
     except Exception as err:
         traceback.print_exc()
@@ -192,12 +203,14 @@
         for job in jobs:
             if job.exitcode != 0:
                 raise Exception
-    except:
+    except Exception as err:
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure Dataengine-service", str(err))
         traceback.print_exc()
         raise Exception
 
     try:
-        dataproc_master_acces_url = "http://" + dataproc_conf['edge_instance_hostname'] + "/{}/".format(
+        dataproc_master_access_url = "https://" + dataproc_conf['edge_instance_hostname'] + "/{}/".format(
             dataproc_conf['exploratory_name'] + '_' + dataproc_conf['computational_name'])
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
@@ -223,11 +236,12 @@
                    "Action": "Create new Dataproc cluster",
                    "computational_url": [
                        {"description": "Dataproc Master",
-                        "url": dataproc_master_acces_url}
+                        "url": dataproc_master_access_url}
                    ]
                    }
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
index 004a1c9..7b9d05a 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
@@ -57,7 +57,7 @@
     job_body['job']['placement']['clusterName'] = cluster_name
     job_body['job']['pysparkJob']['mainPythonFileUri'] = 'gs://{}/jars_parser.py'.format(args.bucket)
     job_body['job']['pysparkJob']['args'][1] = args.bucket
-    job_body['job']['pysparkJob']['args'][3] = (os.environ['project_name']).lower().replace('_', '-')
+    job_body['job']['pysparkJob']['args'][3] = (os.environ['project_name']).replace('_', '-').lower()
     job_body['job']['pysparkJob']['args'][5] = cluster_name
     job_body['job']['pysparkJob']['args'][7] = cluster_version
     job_body['job']['pysparkJob']['args'][9] = os.environ['conf_os_user']
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_jars_parser.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_jars_parser.py
index 4fd5a4a..c07e515 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_jars_parser.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_jars_parser.py
@@ -44,18 +44,21 @@
         outfile.write(r_ver)
 
     os.system('touch /tmp/python_version')
-    python_ver = subprocess.check_output("python3.5 -V 2>/dev/null | awk '{print $2}'", shell=True).decode('UTF-8')
-    if python_ver != '':
-        with open('/tmp/python_version', 'w') as outfile:
-            outfile.write(python_ver)
-    else:
-        python_ver = subprocess.check_output("python3.4 -V 2>/dev/null | awk '{print $2}'", shell=True).decode('UTF-8')
-        with open('/tmp/python_version', 'w') as outfile:
-            outfile.write(python_ver)
+    for v in range(4, 7):
+        python_ver_checker = "python3.{} -V 2>/dev/null".format(v) + " | awk '{print $2}'"
+        python_ver = subprocess.check_output(python_ver_checker, shell=True).decode('UTF-8')
+        if python_ver != '':
+            with open('/tmp/python_version', 'w') as outfile:
+                outfile.write(python_ver)
     os.system('touch /tmp/spark_version')
     spark_ver = subprocess.check_output("dpkg -l | grep spark-core | tr -s ' ' '-' | cut -f 4 -d '-'", shell=True).decode('UTF-8')
     with open('/tmp/spark_version', 'w') as outfile:
         outfile.write(spark_ver)
+    os.system('touch /tmp/scala_version')
+    scala_ver = subprocess.check_output("spark-submit --version 2>&1 | grep -o -P 'Scala version \K.{0,7}'",
+                                        shell=True).decode('UTF-8')
+    with open('/tmp/scala_version', 'w') as outfile:
+        outfile.write(scala_ver)
     os.system('touch /tmp/hadoop_version')
     hadoop_ver = subprocess.check_output("dpkg -l | grep hadoop | head -n 1 | tr -s ' ' '-' | cut -f 3 -d '-'", shell=True).decode('UTF-8')
     with open('/tmp/hadoop_version', 'w') as outfile:
@@ -79,6 +82,7 @@
     os.system('gsutil -m cp {0} gs://{1}/{2}/{3}/'.format(spark_def_path, args.bucket, args.user_name, args.cluster_name))
     os.system('gsutil -m cp /tmp/python_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
     os.system('gsutil -m cp /tmp/spark_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
+    os.system('gsutil -m cp /tmp/scala_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
     os.system('gsutil -m cp /tmp/r_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
     os.system('gsutil -m cp /tmp/hadoop_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
     os.system('gsutil -m cp /tmp/spark.tar.gz gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
index d6bb17a..993b8e7 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -42,73 +43,101 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
-        os.environ['exploratory_name']
-    except:
-        os.environ['exploratory_name'] = ''
-    if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])):
-        time.sleep(30)
-
-    print('Generating infrastructure names and tags')
-    dataproc_conf = dict()
-    try:
-        dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['exploratory_name'] = ''
-    try:
-        dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['computational_name'] = ''
-    dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    dataproc_conf['key_name'] = os.environ['conf_key_name']
-    dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    dataproc_conf['region'] = os.environ['gcp_region']
-    dataproc_conf['zone'] = os.environ['gcp_zone']
-    dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'])
-    dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'],
-                                                                 dataproc_conf['exploratory_name'], dataproc_conf['computational_name'])
-    dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'])
-    dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        dataproc_conf = dict()
+        if 'exploratory_name' in os.environ:
+            dataproc_conf['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            dataproc_conf['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['computational_name'] = ''
+        if os.path.exists('/response/.dataproc_creating_{}'.format(dataproc_conf['exploratory_name'])):
+            time.sleep(30)
+        dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+        dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        dataproc_conf['project_tag'] = dataproc_conf['project_name']
+        dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        dataproc_conf['endpoint_tag'] = dataproc_conf['endpoint_name']
+        dataproc_conf['key_name'] = os.environ['conf_key_name']
+        dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        dataproc_conf['region'] = os.environ['gcp_region']
+        dataproc_conf['zone'] = os.environ['gcp_zone']
+        dataproc_conf['subnet'] = '{0}-{1}-{2}-subnet'.format(dataproc_conf['service_base_name'],
+                                                              dataproc_conf['project_name'],
+                                                              dataproc_conf['endpoint_name'])
+        dataproc_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}'.format(dataproc_conf['service_base_name'],
+                                                                     dataproc_conf['project_name'],
+                                                                     dataproc_conf['endpoint_name'],
+                                                                     dataproc_conf['computational_name'])
+        dataproc_conf['cluster_tag'] = '{0}-{1}-{2}-ps'.format(dataproc_conf['service_base_name'],
                                                                dataproc_conf['project_name'],
                                                                dataproc_conf['endpoint_name'])
-    dataproc_conf['release_label'] = os.environ['dataproc_version']
-    dataproc_conf['cluster_labels'] = {
-        os.environ['notebook_instance_name']: "not-configured",
-        "name": dataproc_conf['cluster_name'],
-        "sbn": dataproc_conf['service_base_name'],
-        "user": dataproc_conf['edge_user_name'],
-        "project_tag": dataproc_conf['project_tag'],
-        "endpoint_tag": dataproc_conf['endpoint_tag'],
-        "notebook_name": os.environ['notebook_instance_name'],
-        "product": "dlab",
-        "computational_name": dataproc_conf['computational_name']
-    }
-    dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
-                                                                         dataproc_conf['project_name'])
-    service_account_email = "{}@{}.iam.gserviceaccount.com".format(dataproc_conf['dataproc_service_account_name'],
-                                                                   os.environ['gcp_project_id'])
-    dataproc_conf['edge_instance_hostname'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
-                                                                        dataproc_conf['project_name'],
-                                                                        dataproc_conf['endpoint_name'])
-    dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+                                                                   dataproc_conf['project_name'],
+                                                                   dataproc_conf['endpoint_name'])
+        dataproc_conf['release_label'] = os.environ['dataproc_version']
+        additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "").replace(
+            "'}", "").lower()
 
-    edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname'])
+        dataproc_conf['cluster_labels'] = {
+            os.environ['notebook_instance_name']: "not-configured",
+            "name": dataproc_conf['cluster_name'],
+            "sbn": dataproc_conf['service_base_name'],
+            "notebook_name": os.environ['notebook_instance_name'],
+            "product": "dlab",
+            "computational_name": dataproc_conf['computational_name']
+        }
+
+        for tag in additional_tags.split(','):
+            label_key = tag.split(':')[0]
+            label_value = tag.split(':')[1].replace('_', '-')
+            if '@' in label_value:
+                label_value = label_value[:label_value.find('@')]
+            if label_value != '':
+                dataproc_conf['cluster_labels'].update({label_key: label_value})
+        dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-{2}-ps-sa'.format(dataproc_conf['service_base_name'],
+                                                                                    dataproc_conf['project_name'],
+                                                                                    dataproc_conf['endpoint_name'])
+        dataproc_conf['dataproc_unique_index'] = GCPMeta.get_index_by_service_account_name(
+            dataproc_conf['dataproc_service_account_name'])
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
+                                                                          dataproc_conf['dataproc_unique_index'],
+                                                                          os.environ['gcp_project_id'])
+        dataproc_conf['edge_instance_hostname'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
+                                                                            dataproc_conf['project_name'],
+                                                                            dataproc_conf['endpoint_name'])
+        dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        sys.exit(1)
+
+    edge_status = GCPMeta.get_instance_status(dataproc_conf['edge_instance_hostname'])
     if edge_status != 'RUNNING':
         logging.info('ERROR: Edge node is unavailable! Aborting...')
         print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
-        append_result("Edge node is unavailable")
+        ssn_hostname = GCPMeta.get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
+        dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                     ssn_hostname)
+        dlab.fab.append_result("Edge node is unavailable")
         sys.exit(1)
 
-    print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+    print("Will create exploratory environment with edge node as access point as following: ".format(
+        json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
     logging.info(json.dumps(dataproc_conf))
 
-    local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
+    try:
+        GCPMeta.dataproc_waiter(dataproc_conf['cluster_labels'])
+        local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
+    except Exception as err:
+        traceback.print_exc()
+        dlab.fab.append_result("Dataproc waiter fail.", str(err))
+        sys.exit(1)
+
     local("echo Waiting for changes to propagate; sleep 10")
 
     dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig'))
@@ -124,20 +153,27 @@
     dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count'])
     dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count'])
     if int(os.environ['dataproc_preemptible_count']) != 0:
-        dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count'])
+        dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(
+            os.environ['dataproc_preemptible_count'])
     else:
         del dataproc_cluster['config']['secondaryWorkerConfig']
     dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label']
-    ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['project_name'] + '.pub').read()
+    ssh_user_pubkey = open('{}{}.pub'.format(os.environ['conf_key_dir'], dataproc_conf['project_name'])).read()
     key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read())
     ssh_admin_pubkey = key.publickey().exportKey("OpenSSH")
-    dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
+    dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(
+        dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
     dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag']
+    with open('/root/result.json', 'w') as f:
+        data = {"hostname": dataproc_conf['cluster_name'], "error": ""}
+        json.dump(data, f)
 
     try:
         logging.info('[Creating Dataproc Cluster]')
         print('[Creating Dataproc Cluster]')
-        params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster))
+        params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'],
+                                                                   dataproc_conf['bucket_name'],
+                                                                   json.dumps(dataproc_cluster))
 
         try:
             local("~/scripts/{}.py {}".format('dataengine-service_create', params))
@@ -148,7 +184,6 @@
         keyfile_name = "/root/keys/{}.pem".format(dataproc_conf['key_name'])
         local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create Dataproc Cluster.", str(err))
+        dlab.fab.append_result("Failed to create Dataproc Cluster.", str(err))
         local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
index 4247234..3710b1c 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
@@ -21,31 +21,34 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
 import boto3
 import argparse
 import sys
+import json
 import os
 
 
 def terminate_dataproc_cluster(notebook_name, dataproc_name, bucket_name, ssh_user, key_path):
     print('Terminating Dataproc cluster and cleaning Dataproc config from bucket')
     try:
-        cluster = meta_lib.GCPMeta().get_list_cluster_statuses([dataproc_name])
+        cluster = GCPMeta.get_list_cluster_statuses([dataproc_name])
         if cluster[0]['status'] == 'running':
-            computational_name = meta_lib.GCPMeta().get_cluster(dataproc_name).get('labels').get('computational_name')
-            actions_lib.GCPActions().bucket_cleanup(bucket_name, os.environ['project_name'], dataproc_name)
+            computational_name = GCPMeta.get_cluster(dataproc_name).get('labels').get('computational_name')
+            GCPActions.bucket_cleanup(bucket_name, dataproc_conf['project_name'], dataproc_name)
             print('The bucket {} has been cleaned successfully'.format(bucket_name))
-            actions_lib.GCPActions().delete_dataproc_cluster(dataproc_name, os.environ['gcp_region'])
+            GCPActions.delete_dataproc_cluster(dataproc_name, os.environ['gcp_region'])
             print('The Dataproc cluster {} has been terminated successfully'.format(dataproc_name))
-            actions_lib.GCPActions().remove_kernels(notebook_name, dataproc_name, cluster[0]['version'], ssh_user,
+            GCPActions.remove_kernels(notebook_name, dataproc_name, cluster[0]['version'], ssh_user,
                                                     key_path, computational_name)
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate Dataproc cluster.", str(err))
         sys.exit(1)
 
 
@@ -58,12 +61,14 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     dataproc_conf = dict()
     dataproc_conf['service_base_name'] = os.environ['conf_service_base_name']
-    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+    dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     dataproc_conf['dataproc_name'] = os.environ['dataproc_cluster_name']
     dataproc_conf['gcp_project_id'] = os.environ['gcp_project_id']
     dataproc_conf['gcp_region'] = os.environ['gcp_region']
@@ -79,13 +84,13 @@
         print('[TERMINATE DATAPROC CLUSTER]')
         try:
             terminate_dataproc_cluster(dataproc_conf['notebook_name'], dataproc_conf['dataproc_name'],
-                                       dataproc_conf['bucket_name'], os.environ['conf_os_user'], dataproc_conf['key_path'])
+                                       dataproc_conf['bucket_name'], os.environ['conf_os_user'],
+                                       dataproc_conf['key_path'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Dataproc cluster.", str(err))
+            dlab.fab.append_result("Failed to terminate Dataproc cluster.", str(err))
             raise Exception
-    except Exception as err:
-        print('Error: {0}'.format(err))
+    except:
         sys.exit(1)
 
     try:
@@ -96,6 +101,6 @@
                    "Action": "Terminate Dataproc cluster"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
index f396bdd..d50e0f0 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -37,7 +38,7 @@
 
 def configure_slave(slave_number, data_engine):
     slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
-    slave_hostname = GCPMeta().get_private_ip_address(slave_name)
+    slave_hostname = GCPMeta.get_private_ip_address(slave_name)
     try:
         logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
         print('[CREATING DLAB SSH USER ON SLAVE NODE]')
@@ -51,18 +52,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to create ssh user on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON SLAVE NODE]')
         logging.info('[INSTALLING USERs KEY ON SLAVE NODE]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
             slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
@@ -70,15 +67,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install ssh user key on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install ssh user key on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -94,12 +87,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to configure proxy on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -114,13 +103,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install prerequisites on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -138,16 +122,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed configuring slave node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to configure slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure slave node.", str(err))
         sys.exit(1)
 
 
+def clear_resources():
+    for i in range(data_engine['instance_count'] - 1):
+        slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+        GCPActions.remove_instance(slave_name, data_engine['zone'])
+    GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
+
+
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
                                                os.environ['request_id'])
@@ -157,12 +143,15 @@
                         filename=local_log_filepath)
 
     try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
         print('Generating infrastructure names and tags')
         data_engine = dict()
-        data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-        data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-        data_engine['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-        data_engine['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['edge_user_name'] = (os.environ['edge_user_name'])
+        data_engine['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
         data_engine['region'] = os.environ['gcp_region']
         data_engine['zone'] = os.environ['gcp_zone']
         try:
@@ -171,24 +160,26 @@
             else:
                 data_engine['vpc_name'] = os.environ['gcp_vpc_name']
         except KeyError:
-            data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
-        try:
-            data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-        except:
+            data_engine['vpc_name'] = '{}-vpc'.format(data_engine['service_base_name'])
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
             data_engine['exploratory_name'] = ''
-        try:
-            data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
             data_engine['computational_name'] = ''
 
-        data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
-                                                             data_engine['project_name'])
+        data_engine['subnet_name'] = '{0}-{1}-{2}-subnet'.format(data_engine['service_base_name'],
+                                                                 data_engine['project_name'],
+                                                                 data_engine['endpoint_name'])
         data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
         data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], data_engine['key_name'])
-        data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
-                                                                           data_engine['project_name'])
+        data_engine['dataengine_service_account_name'] = '{}-{}-{}-ps-sa'.format(data_engine['service_base_name'],
+                                                                                 data_engine['project_name'],
+                                                                                 data_engine['endpoint_name'])
 
         if os.environ['conf_os_family'] == 'debian':
             initial_user = 'ubuntu'
@@ -196,9 +187,10 @@
         if os.environ['conf_os_family'] == 'redhat':
             initial_user = 'ec2-user'
             sudo_group = 'wheel'
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
         data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
@@ -206,23 +198,19 @@
         data_engine['gpu_accelerator_type'] = 'None'
         if os.environ['application'] in ('tensor', 'deeplearning'):
             data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
-        data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
-                                                         data_engine['project_name'])
-        master_node_hostname = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
+        data_engine['network_tag'] = '{0}-{1}-{2}-ps'.format(data_engine['service_base_name'],
+                                                             data_engine['project_name'],
+                                                             data_engine['endpoint_name'])
+        master_node_hostname = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
                                                        data_engine['project_name'], data_engine['endpoint_tag'])
-        edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-        edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
         data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
@@ -238,33 +226,26 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to create ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on master.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON MASTER NODE]')
         logging.info('[INSTALLING USERs KEY ON MASTER NODE]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
+            master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem",
+            json.dumps(additional_config), data_engine['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install ssh user on master.", str(err))
         sys.exit(1)
 
     try:
@@ -280,12 +261,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to configure proxy on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on master.", str(err))
         sys.exit(1)
 
     try:
@@ -300,13 +277,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install prerequisites on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
         sys.exit(1)
 
     try:
@@ -324,12 +296,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure master node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to configure master node", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -344,17 +312,14 @@
             if job.exitcode != 0:
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to configure slave nodes", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
-        notebook_instance_ip = GCPMeta().get_private_ip_address(data_engine['notebook_name'])
+        notebook_instance_ip = GCPMeta.get_private_ip_address(data_engine['notebook_name'])
         additional_info = {
             "computational_name": data_engine['computational_name'],
             "master_node_hostname": master_node_hostname,
@@ -379,20 +344,17 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to configure reverse proxy", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        ip_address = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
+        ip_address = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
         spark_master_url = "http://" + ip_address + ":8080"
-        spark_master_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+        spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
@@ -409,13 +371,14 @@
                    "Action": "Create new Data Engine",
                    "computational_url": [
                        {"description": "Apache Spark Master",
-                        "url": spark_master_acces_url},
+                        "url": spark_master_access_url},
                        # {"description": "Apache Spark Master (via tunnel)",
                        # "url": spark_master_url}
                    ]
                    }
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
index d0cf7ea..262868c 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import argparse
+from fabric.api import *
 
 if __name__ == "__main__":
     instance_class = 'notebook'
@@ -38,147 +40,167 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    print('Generating infrastructure names and tags')
-    data_engine = dict()
-    data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    data_engine['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    data_engine['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    data_engine['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    data_engine['region'] = os.environ['gcp_region']
-    data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['endpoint_name'] = os.environ['endpoint_name']
-
-    edge_status = GCPMeta().get_instance_status('{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
-                                                                          data_engine['project_name'],
-                                                                          data_engine['endpoint_name']))
-    if edge_status != 'RUNNING':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = GCPMeta().get_private_ip_address(data_engine['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                            ssn_hostname)
-        append_result("Edge node is unavailable")
-        sys.exit(1)
-
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        data_engine = dict()
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['edge_user_name'] = (os.environ['edge_user_name'])
+        data_engine['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        data_engine['project_tag'] = data_engine['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
+        data_engine['region'] = os.environ['gcp_region']
+        data_engine['zone'] = os.environ['gcp_zone']
+
+        edge_status = GCPMeta.get_instance_status('{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
+                                                                            data_engine['project_name'],
+                                                                            data_engine['endpoint_name']))
+        if edge_status != 'RUNNING':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            ssn_hostname = GCPMeta.get_private_ip_address(data_engine['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                data_engine['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            data_engine['vpc_name'] = '{}-vpc'.format(data_engine['service_base_name'])
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
         else:
-            data_engine['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
-    try:
-        data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
-        data_engine['computational_name'] = ''
+            data_engine['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
+            data_engine['computational_name'] = ''
 
-    data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
-                                                         data_engine['project_name'])
-    data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
-    data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
-    data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
-                                                                       data_engine['project_name'])
+        data_engine['subnet_name'] = '{0}-{1}-{2}-subnet'.format(data_engine['service_base_name'],
+                                                                 data_engine['project_name'],
+                                                                 data_engine['endpoint_name'])
+        data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
+        data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
+        data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        data_engine['dataengine_service_account_name'] = '{}-{}-{}-ps-sa'.format(data_engine['service_base_name'],
+                                                                                 data_engine['project_name'],
+                                                                                 data_engine['endpoint_name'])
 
-    if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
-    if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
-    data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['project_name'] + \
-                                  '-de-' + data_engine['exploratory_name'] + '-' + \
-                                  data_engine['computational_name']
-    data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
-    data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
-    data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
-    data_engine['notebook_name'] = os.environ['notebook_instance_name']
+        if os.environ['conf_os_family'] == 'debian':
+            initial_user = 'ubuntu'
+            sudo_group = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            initial_user = 'ec2-user'
+            sudo_group = 'wheel'
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
+        data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
+        data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
+        data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
+        data_engine['notebook_name'] = os.environ['notebook_instance_name']
 
-    data_engine['primary_disk_size'] = '30'
-    data_engine['secondary_disk_size'] = os.environ['notebook_disk_size']
+        data_engine['primary_disk_size'] = '30'
+        data_engine['secondary_disk_size'] = os.environ['notebook_disk_size']
 
-    data_engine['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if data_engine['shared_image_enabled'] == 'false':
-        data_engine['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], data_engine['project_name'],
-            os.environ['application'])
-        data_engine['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], data_engine['project_name'],
-            os.environ['application'])
-    else:
-        data_engine['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
-        data_engine['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
-    data_engine['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
-    else data_engine['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
-    print('Searching pre-configured images')
-    data_engine['primary_image_name'] = GCPMeta().get_image_by_name(data_engine['notebook_primary_image_name'])
-    if data_engine['primary_image_name'] == '':
-        data_engine['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    else:
-        print('Pre-configured primary image found. Using: {}'.format(data_engine['primary_image_name'].get('name')))
-        data_engine['primary_image_name'] = 'global/images/{}'.format(
-            data_engine['primary_image_name'].get('name'))
+        data_engine['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if data_engine['shared_image_enabled'] == 'false':
+            data_engine['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_tag'],
+                os.environ['application'])
+            data_engine['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_tag'],
+                os.environ['application'])
+        else:
+            data_engine['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
+            data_engine['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
+        data_engine['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
+        else data_engine['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
+        print('Searching pre-configured images')
+        data_engine['primary_image_name'] = GCPMeta.get_image_by_name(data_engine['notebook_primary_image_name'])
+        if data_engine['primary_image_name'] == '':
+            data_engine['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        else:
+            print('Pre-configured primary image found. Using: {}'.format(data_engine['primary_image_name'].get('name')))
+            data_engine['primary_image_name'] = 'global/images/{}'.format(
+                data_engine['primary_image_name'].get('name'))
 
-    data_engine['secondary_image_name'] = GCPMeta().get_image_by_name(data_engine['expected_secondary_image_name'])
-    if data_engine['secondary_image_name'] == '':
-        data_engine['secondary_image_name'] = 'None'
-    else:
-        print('Pre-configured secondary image found. Using: {}'.format(data_engine['secondary_image_name'].get('name')))
-        data_engine['secondary_image_name'] = 'global/images/{}'.format(data_engine['secondary_image_name'].get('name'))
+        data_engine['secondary_image_name'] = GCPMeta.get_image_by_name(data_engine['expected_secondary_image_name'])
+        if data_engine['secondary_image_name'] == '':
+            data_engine['secondary_image_name'] = 'None'
+        else:
+            print('Pre-configured secondary image found. Using: {}'.format(
+                data_engine['secondary_image_name'].get('name')))
+            data_engine['secondary_image_name'] = 'global/images/{}'.format(
+                data_engine['secondary_image_name'].get('name'))
 
-    data_engine['gpu_accelerator_type'] = 'None'
-    if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
-        data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
-    data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
-                                                     data_engine['project_name'])
-    data_engine['slave_labels'] = {"name": data_engine['cluster_name'],
-                                   "sbn": data_engine['service_base_name'],
-                                   "user": data_engine['edge_user_name'],
-                                   "project_tag": data_engine['project_tag'],
-                                   "endpoint_tag": data_engine['endpoint_tag'],
-                                   "type": "slave",
-                                   "notebook_name": data_engine['notebook_name'],
-                                   "product": "dlab"}
-    data_engine['master_labels'] = {"name": data_engine['cluster_name'],
-                                    "sbn": data_engine['service_base_name'],
-                                    "user": data_engine['edge_user_name'],
-                                    "project_tag": data_engine['project_tag'],
-                                    "endpoint_tag": data_engine['endpoint_tag'],
-                                    "type": "master",
-                                    "notebook_name": data_engine['notebook_name'],
-                                    "product": "dlab"}
+        with open('/root/result.json', 'w') as f:
+            data = {"hostname": data_engine['cluster_name'], "error": ""}
+            json.dump(data, f)
+
+        data_engine['gpu_accelerator_type'] = 'None'
+        if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
+            data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
+        data_engine['network_tag'] = '{0}-{1}-{2}-ps'.format(data_engine['service_base_name'],
+                                                             data_engine['project_name'], data_engine['endpoint_name'])
+        additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "").replace(
+            "'}", "").lower()
+
+        data_engine['slave_labels'] = {"name": data_engine['cluster_name'],
+                                       "sbn": data_engine['service_base_name'],
+                                       "type": "slave",
+                                       "notebook_name": data_engine['notebook_name'],
+                                       "product": "dlab"}
+        data_engine['master_labels'] = {"name": data_engine['cluster_name'],
+                                        "sbn": data_engine['service_base_name'],
+                                        "type": "master",
+                                        "notebook_name": data_engine['notebook_name'],
+                                        "product": "dlab"}
+
+        for tag in additional_tags.split(','):
+            label_key = tag.split(':')[0]
+            label_value = tag.split(':')[1].replace('_', '-')
+            if '@' in label_value:
+                label_value = label_value[:label_value.find('@')]
+            if label_value != '':
+                data_engine['slave_labels'].update({label_key: label_value})
+                data_engine['master_labels'].update({label_key: label_value})
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        sys.exit(1)
 
     try:
         logging.info('[CREATE MASTER NODE]')
         print('[CREATE MASTER NODE]')
         params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
                  "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
-                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13}  " \
-                 "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}'". \
+                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+                 "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} " \
+                 "--labels '{17}' --service_base_name {18}". \
             format(data_engine['master_node_name'], data_engine['region'], data_engine['zone'], data_engine['vpc_name'],
                    data_engine['subnet_name'], data_engine['master_size'], data_engine['ssh_key_path'], initial_user,
                    data_engine['dataengine_service_account_name'], data_engine['primary_image_name'],
                    data_engine['secondary_image_name'], 'dataengine', data_engine['primary_disk_size'],
                    data_engine['secondary_disk_size'], data_engine['gpu_accelerator_type'],
                    data_engine['network_tag'], data_engine['cluster_name'],
-                   json.dumps(data_engine['master_labels']))
+                   json.dumps(data_engine['master_labels']), data_engine['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
         sys.exit(1)
 
     try:
@@ -186,10 +208,11 @@
             logging.info('[CREATE SLAVE NODE {}]'.format(i + 1))
             print('[CREATE SLAVE NODE {}]'.format(i + 1))
             slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
-                     "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
-                     "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
-                     "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}'". \
+            params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} " \
+                     "--instance_size {5} --ssh_key_path {6} --initial_user {7} --service_account_name {8} " \
+                     "--image_name {9} --secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+                     "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} " \
+                     "--labels '{17}' --service_base_name {18}". \
                 format(slave_name, data_engine['region'], data_engine['zone'],
                        data_engine['vpc_name'], data_engine['subnet_name'], data_engine['slave_size'],
                        data_engine['ssh_key_path'], initial_user, data_engine['dataengine_service_account_name'],
@@ -197,20 +220,19 @@
                        data_engine['primary_disk_size'],
                        data_engine['secondary_disk_size'], data_engine['gpu_accelerator_type'],
                        data_engine['network_tag'], data_engine['cluster_name'],
-                       json.dumps(data_engine['slave_labels']))
+                       json.dumps(data_engine['slave_labels']), data_engine['service_base_name'])
             try:
                 local("~/scripts/{}.py {}".format('common_create_instance', params))
             except:
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         for i in range(data_engine['instance_count'] - 1):
             slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
             try:
-                GCPActions().remove_instance(slave_name, data_engine['zone'])
+                GCPActions.remove_instance(slave_name, data_engine['zone'])
             except:
                 print("The slave instance {} hasn't been created.".format(slave_name))
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to create slave instances.", str(err))
+        GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to create slave instances.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
index 0e40ed9..ce5af48 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
@@ -24,22 +24,24 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
 
 
 def start_data_engine(zone, cluster_name):
     print("Starting data engine cluster")
     try:
-        instances = GCPMeta().get_list_instances(zone, cluster_name)
+        instances = GCPMeta.get_list_instances(zone, cluster_name)
         if 'items' in instances:
             for i in instances['items']:
-                GCPActions().start_instance(i['name'], zone)
+                GCPActions.start_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to start dataengine", str(err))
         sys.exit(1)
 
 
@@ -51,23 +53,27 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+    data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     try:
         logging.info('[STARTING DATA ENGINE]')
         print('[STARTING DATA ENGINE]')
@@ -75,7 +81,7 @@
             start_data_engine(data_engine['zone'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to start Data Engine.", str(err))
+            dlab.fab.append_result("Failed to start Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -84,9 +90,9 @@
         logging.info('[UPDATE LAST ACTIVITY TIME]')
         print('[UPDATE LAST ACTIVITY TIME]')
         data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
-        data_engine['notebook_ip'] = GCPMeta().get_private_ip_address(os.environ['notebook_instance_name'])
-        data_engine['computational_ip'] = GCPMeta().get_private_ip_address(data_engine['computational_id'])
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+        data_engine['notebook_ip'] = GCPMeta.get_private_ip_address(os.environ['notebook_instance_name'])
+        data_engine['computational_ip'] = GCPMeta.get_private_ip_address(data_engine['computational_id'])
         data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
             .format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -95,18 +101,17 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
 
-
     try:
         with open("/root/result.json", 'w') as result:
             res = {"service_base_name": data_engine['service_base_name'],
                    "Action": "Start Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
index 2396600..e370bfb 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
@@ -24,9 +24,10 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 
@@ -34,12 +35,12 @@
 def stop_data_engine(zone, cluster_name):
     print("Stopping data engine cluster")
     try:
-        instances = GCPMeta().get_list_instances(zone, cluster_name)
+        instances = GCPMeta.get_list_instances(zone, cluster_name)
         if 'items' in instances:
             for i in instances['items']:
-                GCPActions().stop_instance(i['name'], zone)
+                GCPActions.stop_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop dataengine", str(err))
         sys.exit(1)
 
 
@@ -51,23 +52,27 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+    data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     try:
         logging.info('[STOPPING DATA ENGINE]')
         print('[STOPPING DATA ENGINE]')
@@ -75,7 +80,7 @@
             stop_data_engine(data_engine['zone'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to stop Data Engine.", str(err))
+            dlab.fab.append_result("Failed to stop Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -85,6 +90,6 @@
                    "Action": "Stop Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
index f50ffb2..6d9adfd 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
@@ -24,9 +24,10 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 
@@ -34,19 +35,19 @@
 def terminate_data_engine(zone, notebook_name, os_user, key_path, cluster_name):
     print("Terminating data engine cluster")
     try:
-        instances = GCPMeta().get_list_instances(zone, cluster_name)
+        instances = GCPMeta.get_list_instances(zone, cluster_name)
         if 'items' in instances:
             for i in instances['items']:
-                GCPActions().remove_instance(i['name'], zone)
+                GCPActions.remove_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine", str(err))
         sys.exit(1)
 
     print("Removing Data Engine kernels from notebook")
     try:
-        remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name)
+        dlab.actions_lib.remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove dataengine kernels from notebook", str(err))
         sys.exit(1)
 
 
@@ -58,25 +59,29 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+    data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     data_engine['notebook_name'] = os.environ['notebook_instance_name']
-    data_engine['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+    data_engine['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
 
 
     try:
@@ -87,7 +92,7 @@
                                   data_engine['key_path'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Data Engine.", str(err))
+            dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -98,6 +103,6 @@
                    "Action": "Terminate Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
index c11639c..238f9b4 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -37,67 +39,76 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -105,9 +116,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -124,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -134,16 +143,16 @@
         logging.info('[INSTALLING PREREQUISITES TO DEEPLEARNING NOTEBOOK INSTANCE]')
         print('[INSTALLING PREREQUISITES TO DEEPLEARNING NOTEBOOK INSTANCE]')
         params = "--hostname {} --keyfile {} --user {} --region {}". \
-            format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'], os.environ['gcp_region'])
+            format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'],
+                   os.environ['gcp_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -165,27 +174,26 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Deep Learning node.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY]')
         logging.info('[INSTALLING USERs KEY]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": notebook_config['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -197,35 +205,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -250,59 +257,64 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    tensorboard_acces_url = "http://" + edge_instance_hostname + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['project_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
index 2271d97..110efb9 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
@@ -22,10 +22,17 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import traceback
+import logging
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import uuid
+from fabric.api import *
+
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,80 +42,124 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    print('Generating infrastructure names and tags')
-    edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    edge_conf['key_name'] = os.environ['conf_key_name']
-    edge_conf['user_keyname'] = os.environ['project_name']
-    edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    edge_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    def clear_resources():
+        GCPActions.remove_instance(edge_conf['instance_name'], edge_conf['zone'])
+        GCPActions.remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
+        GCPActions.remove_bucket(edge_conf['bucket_name'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(edge_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(edge_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(edge_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
+        GCPActions.remove_role(edge_conf['ps_role_name'])
+        GCPActions.remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
+        GCPActions.remove_role(edge_conf['edge_role_name'])
+        GCPActions.remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        edge_conf = dict()
+        edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        edge_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        edge_conf['key_name'] = os.environ['conf_key_name']
+        edge_conf['user_keyname'] = edge_conf['project_name']
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                edge_conf['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            edge_conf['vpc_name'] = edge_conf['service_base_name'] + '-vpc'
+        edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        edge_conf['subnet_name'] = '{0}-{1}-{2}-subnet'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name'])
+        edge_conf['region'] = os.environ['gcp_region']
+        edge_conf['zone'] = os.environ['gcp_zone']
+        edge_conf['vpc_selflink'] = GCPMeta.get_vpc(edge_conf['vpc_name'])['selfLink']
+        edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+        edge_conf['edge_service_account_name'] = '{}-{}-{}-edge-sa'.format(edge_conf['service_base_name'],
+                                                                           edge_conf['project_name'],
+                                                                           edge_conf['endpoint_name'])
+        edge_conf['edge_unique_index'] = GCPMeta.get_index_by_service_account_name(
+            edge_conf['edge_service_account_name'])
+        edge_conf['edge_role_name'] = '{}-{}-{}-edge-role'.format(edge_conf['service_base_name'],
+                                                                  edge_conf['project_name'],
+                                                                  edge_conf['edge_unique_index'])
+        edge_conf['ps_service_account_name'] = '{}-{}-{}-ps-sa'.format(edge_conf['service_base_name'],
+                                                                       edge_conf['project_name'],
+                                                                       edge_conf['endpoint_name'])
+        edge_conf['ps_unique_index'] = GCPMeta.get_index_by_service_account_name(edge_conf['ps_service_account_name'])
+        edge_conf['ps_role_name'] = '{}-{}-{}-ps-role'.format(edge_conf['service_base_name'],
+                                                              edge_conf['project_name'], edge_conf['ps_unique_index'])
+        edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'], edge_conf['endpoint_name'])
+        edge_conf['firewall_name'] = edge_conf['instance_name'] + '{}-sg'.format(edge_conf['instance_name'])
+        edge_conf['notebook_firewall_name'] = '{0}-{1}-{2}-nb-sg'.format(edge_conf['service_base_name'],
+                                                                         edge_conf['project_name'],
+                                                                         edge_conf['endpoint_name'])
+        edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name'])
+        edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                         edge_conf['endpoint_name'])
+        edge_conf['instance_size'] = os.environ['gcp_edge_instance_size']
+        edge_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(edge_conf['service_base_name'],
+                                                                          edge_conf['project_name'],
+                                                                          edge_conf['endpoint_name'])
+        edge_conf['instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(edge_conf['instance_name'])
+        edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        edge_conf['private_subnet_cidr'] = GCPMeta.get_subnet(edge_conf['subnet_name'],
+                                                                edge_conf['region'])['ipCidrRange']
+        edge_conf['static_ip'] = \
+            GCPMeta.get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
+        edge_conf['private_ip'] = GCPMeta.get_private_ip_address(edge_conf['instance_name'])
+        edge_conf['vpc_cidrs'] = [edge_conf['vpc_cidr']]
+        edge_conf['fw_common_name'] = '{}-{}-{}-ps-sg'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                                              edge_conf['endpoint_name'])
+        edge_conf['fw_ps_ingress'] = '{}-ingress'.format(edge_conf['fw_common_name'])
+        edge_conf['fw_ps_egress_private'] = '{}-egress-private'.format(edge_conf['fw_common_name'])
+        edge_conf['fw_ps_egress_public'] = '{}-egress-public'.format(edge_conf['fw_common_name'])
+        edge_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(edge_conf['instance_name'])
+        edge_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(edge_conf['instance_name'])
+        edge_conf['fw_edge_egress_public'] = '{}-egress-public'.format(edge_conf['instance_name'])
+        edge_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(edge_conf['instance_name'])
+
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            edge_conf['step_cert_sans'] = ' --san {0} --san {1} --san {2}'.format(edge_conf['static_ip'],
+                                                                                  edge_conf['instance_hostname'],
+                                                                                  edge_conf['private_ip'])
         else:
-            edge_conf['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        edge_conf['vpc_name'] = edge_conf['service_base_name'] + '-ssn-vpc'
-    edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-    edge_conf['subnet_name'] = '{0}-{1}-subnet'.format(edge_conf['service_base_name'], edge_conf['project_name'])
-    edge_conf['region'] = os.environ['gcp_region']
-    edge_conf['zone'] = os.environ['gcp_zone']
-    edge_conf['vpc_selflink'] = GCPMeta().get_vpc(edge_conf['vpc_name'])['selfLink']
-    edge_conf['private_subnet_prefix'] = os.environ['gcp_private_subnet_prefix']
-    edge_conf['edge_service_account_name'] = '{}-{}-edge'.format(edge_conf['service_base_name'],
-                                                                 edge_conf['project_name'])
-    edge_conf['edge_role_name'] = '{}-{}-edge'.format(edge_conf['service_base_name'],
-                                                      edge_conf['project_name'])
-    edge_conf['ps_service_account_name'] = '{}-{}-ps'.format(edge_conf['service_base_name'],
-                                                             edge_conf['project_name'])
-    edge_conf['ps_role_name'] = '{}-{}-ps'.format(edge_conf['service_base_name'],
-                                                  edge_conf['project_name'])
-    edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
-                                                           edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['firewall_name'] = edge_conf['instance_name'] + '{}-firewall'.format(edge_conf['instance_name'])
-    edge_conf['notebook_firewall_name'] = '{0}-{1}-nb-firewall'.format(edge_conf['service_base_name'],
-                                                                       edge_conf['project_name'])
-    edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
-                                                           edge_conf['project_name'],
-                                                           edge_conf['endpoint_name'])
-    edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
-                                                                     edge_conf['endpoint_name'])
-    edge_conf['instance_size'] = os.environ['gcp_edge_instance_size']
-    edge_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_conf['static_address_name'] = '{0}-{1}-ip'.format(edge_conf['service_base_name'], edge_conf['project_name'])
-    instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_conf['instance_name'])
-    edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-    edge_conf['private_subnet_cidr'] = GCPMeta().get_subnet(edge_conf['subnet_name'],
-                                                            edge_conf['region'])['ipCidrRange']
-    edge_conf['static_ip'] = \
-        GCPMeta().get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
-    edge_conf['private_ip'] = GCPMeta().get_private_ip_address(edge_conf['instance_name'])
-    edge_conf['vpc_cidrs'] = [edge_conf['vpc_cidr']]
-    edge_conf['fw_common_name'] = '{}-{}-ps'.format(edge_conf['service_base_name'], edge_conf['project_name'])
-    edge_conf['fw_ps_ingress'] = '{}-ingress'.format(edge_conf['fw_common_name'])
-    edge_conf['fw_ps_egress_private'] = '{}-egress-private'.format(edge_conf['fw_common_name'])
-    edge_conf['fw_ps_egress_public'] = '{}-egress-public'.format(edge_conf['fw_common_name'])
-    edge_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(edge_conf['instance_name'])
-    edge_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(edge_conf['instance_name'])
-    edge_conf['fw_edge_egress_public'] = '{}-egress-public'.format(edge_conf['instance_name'])
-    edge_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(edge_conf['instance_name'])
-    edge_conf['allowed_ip_cidr'] = list()
-    for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-        edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+            edge_conf['step_cert_sans'] = ''
+
+        edge_conf['allowed_ip_cidr'] = list()
+        for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+            edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            edge_conf['initial_user'] = 'ubuntu'
+            edge_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            edge_conf['initial_user'] = 'ec2-user'
+            edge_conf['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, "/root/keys/" + os.environ['conf_key_name'] + ".pem", initial_user,
-             edge_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+                 edge_conf['instance_hostname'], "/root/keys/" + os.environ['conf_key_name'] + ".pem",
+                 edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -116,53 +167,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING PREREQUISITES]')
         logging.info('[INSTALLING PREREQUISITES]')
-        params = "--hostname {} --keyfile {} --user {} --region {}".\
-            format(instance_hostname, edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'], os.environ['gcp_region'])
+        params = "--hostname {} --keyfile {} --user {} --region {}".format(
+                  edge_conf['instance_hostname'], edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'],
+                  os.environ['gcp_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -170,7 +192,7 @@
         logging.info('[INSTALLING HTTP PROXY]')
         additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
                              "template_file": "/root/templates/squid.conf",
-                             "project_name": os.environ['project_name'],
+                             "project_name": edge_conf['project_name'],
                              "ldap_host": os.environ['ldap_hostname'],
                              "ldap_dn": os.environ['ldap_dn'],
                              "ldap_user": os.environ['ldap_service_username'],
@@ -178,30 +200,16 @@
                              "vpc_cidrs": edge_conf['vpc_cidrs'],
                              "allowed_ip_cidr": edge_conf['allowed_ip_cidr']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
-                 .format(instance_hostname, edge_conf['ssh_key_path'], json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+                 .format(edge_conf['instance_hostname'], edge_conf['ssh_key_path'], json.dumps(additional_config),
+                         edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('configure_http_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing http proxy.", str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing http proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
 
@@ -211,67 +219,58 @@
         additional_config = {"user_keyname": edge_conf['user_keyname'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, edge_conf['ssh_key_path'], json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+            edge_conf['instance_hostname'], edge_conf['ssh_key_path'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key. Excpeption: " + str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing users key. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING NGINX REVERSE PROXY]')
         logging.info('[INSTALLING NGINX REVERSE PROXY]')
-        params = "--hostname {} --keyfile {} --user {}" \
-            .format(instance_hostname, edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'])
+        edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
+        params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
+                 "--step_cert_sans '{}'".format(edge_conf['instance_hostname'], edge_conf['ssh_key_path'],
+                                                edge_conf['dlab_ssh_user'], '{}-{}-{}'.format(
+                                                                             edge_conf['service_base_name'],
+                                                                             edge_conf['project_name'],
+                                                                             edge_conf['endpoint_name']),
+                                                edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
+
         try:
             local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
+        keycloak_params = "--service_base_name {} --keycloak_auth_server_url {} --keycloak_realm_name {} " \
+                          "--keycloak_user {} --keycloak_user_password {} --keycloak_client_secret {} " \
+                          "--edge_public_ip {} --project_name {} --endpoint_name {} " \
+            .format(edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
+                    os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
+                    os.environ['keycloak_user_password'],
+                    edge_conf['keycloak_client_secret'], edge_conf['instance_hostname'], edge_conf['project_name'],
+                    edge_conf['endpoint_name'])
+        try:
+            local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
+        except:
+            traceback.print_exc()
+            raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing nginx reverse proxy. Excpeption: " + str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing nginx reverse proxy. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
-        print("Hostname: {}".format(instance_hostname))
+        print("Hostname: {}".format(edge_conf['instance_hostname']))
         print("Public IP: {}".format(edge_conf['static_ip']))
         print("Private IP: {}".format(edge_conf['private_ip']))
         print("Key name: {}".format(edge_conf['key_name']))
@@ -279,7 +278,7 @@
         print("Shared bucket name: {}".format(edge_conf['shared_bucket_name']))
         print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr']))
         with open("/root/result.json", 'w') as result:
-            res = {"hostname": instance_hostname,
+            res = {"hostname": edge_conf['instance_hostname'],
                    "public_ip": edge_conf['static_ip'],
                    "ip": edge_conf['private_ip'],
                    "instance_id": edge_conf['instance_name'],
@@ -290,11 +289,12 @@
                    "socks_port": "1080",
                    "notebook_subnet": edge_conf['private_subnet_cidr'],
                    "full_edge_conf": edge_conf,
-                   "project_name": os.environ['project_name'],
+                   "project_name": edge_conf['project_name'],
                    "@class": "com.epam.dlab.dto.gcp.edge.EdgeInfoGcp",
                    "Action": "Create new EDGE server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
index 06085dd..52da1be 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import os
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,31 +39,34 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    edge_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+    edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    edge_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
     edge_conf['region'] = os.environ['gcp_region']
     edge_conf['zone'] = os.environ['gcp_zone']
-    edge_conf['static_address_name'] = '{0}-{1}-ip'.format(edge_conf['service_base_name'], edge_conf['project_name'])
+    edge_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name'])
 
     logging.info('[START EDGE]')
     print('[START EDGE]')
     try:
-        GCPActions().start_instance(edge_conf['instance_name'], edge_conf['zone'])
+        GCPActions.start_instance(edge_conf['instance_name'], edge_conf['zone'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start edge.", str(err))
+        dlab.fab.append_result("Failed to start edge.", str(err))
         sys.exit(1)
 
     try:
-        instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_conf['instance_name'])
+        instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_conf['instance_name'])
         public_ip_address = \
-            GCPMeta().get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
-        ip_address = GCPMeta().get_private_ip_address(edge_conf['instance_name'])
+            GCPMeta.get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
+        ip_address = GCPMeta.get_private_ip_address(edge_conf['instance_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
@@ -74,7 +81,7 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
         sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
index 5342d8a..ee15222 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import logging
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,21 +39,22 @@
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     edge_conf['zone'] = os.environ['gcp_zone']
-    edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+    edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    edge_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
 
     logging.info('[STOP EDGE]')
     print('[STOP EDGE]')
     try:
-        GCPActions().stop_instance(edge_conf['instance_name'], edge_conf['zone'])
+        GCPActions.stop_instance(edge_conf['instance_name'], edge_conf['zone'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop edge.", str(err))
+        dlab.fab.append_result("Failed to stop edge.", str(err))
         sys.exit(1)
 
     try:
@@ -58,7 +63,6 @@
                    "Action": "Stop edge server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
index 155a9c3..8080988 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
@@ -22,118 +22,127 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import traceback
+import logging
 
 
-def terminate_edge_node(user_name, service_base_name, region, zone):
+def terminate_edge_node(user_name, service_base_name, region, zone, project_name, endpoint_name):
     print("Terminating Dataengine-service clusters")
     try:
         labels = [
             {'sbn': service_base_name},
             {'user': user_name}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataproc", str(err))
         sys.exit(1)
 
     print("Terminating EDGE and notebook instances")
-    base = '{}-{}'.format(service_base_name, user_name)
-    keys = ['edge', 'ps', 'ip', 'bucket', 'subnet']
+    base = '{}-{}-{}'.format(service_base_name, project_name, endpoint_name)
+    keys = ['edge', 'ps', 'static-ip', 'bucket', 'subnet']
     targets = ['{}-{}'.format(base, k) for k in keys]
     try:
-        instances = GCPMeta().get_list_instances(zone, base)
+        instances = GCPMeta.get_list_instances(zone, base)
         if 'items' in instances:
             for i in instances['items']:
                 if 'user' in i['labels'] and user_name == i['labels']['user']:
-                    GCPActions().remove_instance(i['name'], zone)
+                    GCPActions.remove_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instances", str(err))
         sys.exit(1)
 
     print("Removing static addresses")
     try:
-        static_addresses = GCPMeta().get_list_static_addresses(region, base)
+        static_addresses = GCPMeta.get_list_static_addresses(region, base)
         if 'items' in static_addresses:
             for i in static_addresses['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_static_address(i['name'], region)
+                    GCPActions.remove_static_address(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IPs", str(err))
         sys.exit(1)
 
     print("Removing storage bucket")
     try:
-        buckets = GCPMeta().get_list_buckets(base)
+        buckets = GCPMeta.get_list_buckets(base)
         if 'items' in buckets:
             for i in buckets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_bucket(i['name'])
+                    GCPActions.remove_bucket(i['name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove buckets", str(err))
         sys.exit(1)
 
     print("Removing firewalls")
     try:
-        firewalls = GCPMeta().get_list_firewalls(base)
+        firewalls = GCPMeta.get_list_firewalls(base)
         if 'items' in firewalls:
             for i in firewalls['items']:
                 if bool(set(targets) & set(i['targetTags'])):
-                    GCPActions().remove_firewall(i['name'])
+                    GCPActions.remove_firewall(i['name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing Service accounts and roles")
     try:
-        list_service_accounts = GCPMeta().get_list_service_accounts()
+        list_service_accounts = GCPMeta.get_list_service_accounts()
         for service_account in (set(targets) & set(list_service_accounts)):
             if service_account.startswith(service_base_name):
-                GCPActions().remove_service_account(service_account)
-        list_roles_names = GCPMeta().get_list_roles()
+                GCPActions.remove_service_account(service_account, service_base_name)
+        list_roles_names = GCPMeta.get_list_roles()
         for role in (set(targets) & set(list_roles_names)):
             if role.startswith(service_base_name):
-                GCPActions().remove_role(role)
+                GCPActions.remove_role(role)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove service accounts and roles", str(err))
         sys.exit(1)
 
     print("Removing subnets")
     try:
-        list_subnets = GCPMeta().get_list_subnetworks(region, '', base)
+        list_subnets = GCPMeta.get_list_subnetworks(region, '', base)
         if 'items' in list_subnets:
             vpc_selflink = list_subnets['items'][0]['network']
             vpc_name = vpc_selflink.split('/')[-1]
-            subnets = GCPMeta().get_list_subnetworks(region, vpc_name, base)
+            subnets = GCPMeta.get_list_subnetworks(region, vpc_name, base)
             for i in subnets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_subnet(i['name'], region)
+                    GCPActions.remove_subnet(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnets", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    edge_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+    edge_conf['edge_user_name'] = (os.environ['edge_user_name'])
+    edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    edge_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
     edge_conf['region'] = os.environ['gcp_region']
     edge_conf['zone'] = os.environ['gcp_zone']
 
@@ -142,12 +151,13 @@
         print('[TERMINATE EDGE]')
         try:
             terminate_edge_node(edge_conf['edge_user_name'], edge_conf['service_base_name'],
-                                edge_conf['region'], edge_conf['zone'])
+                                edge_conf['region'], edge_conf['zone'], edge_conf['project_name'],
+                                edge_conf['endpoint_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
-    except Exception as err:
-        print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to terminate edge.", str(err))
+            raise Exception
+    except:
         sys.exit(1)
 
     try:
@@ -157,6 +167,6 @@
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
index f49724c..863be19 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -37,63 +39,76 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-                                                        notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-                                                        notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -101,9 +116,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -120,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -138,9 +151,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -164,9 +176,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -175,16 +186,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -196,35 +207,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -249,51 +259,55 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
-    print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_access_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_dataengine-service_create_configs.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_dataengine-service_create_configs.py
index 959f9b5..9bc8e37 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_dataengine-service_create_configs.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_dataengine-service_create_configs.py
@@ -50,6 +50,7 @@
 parser.add_argument('--application', type=str, default='')
 parser.add_argument('--r_version', type=str, default='')
 parser.add_argument('--r_enabled', type=str, default='')
+parser.add_argument('--scala_version', type=str, default='')
 args = parser.parse_args()
 
 dataproc_dir = '/opt/{}/jars/'.format(args.dataproc_version)
@@ -79,7 +80,6 @@
 
 def toree_kernel(args):
     spark_path = '/opt/{0}/{1}/spark/'.format(args.dataproc_version, args.cluster_name)
-    scala_version = local('scala -e "println(scala.util.Properties.versionNumberString)"', capture=True)
     local('mkdir -p {0}toree_{1}/'.format(kernels_dir, args.cluster_name))
     local('tar zxvf /tmp/toree_kernel.tar.gz -C {0}toree_{1}/'.format(kernels_dir, args.cluster_name))
     local('sudo mv {0}toree_{1}/toree-0.2.0-incubating/* {0}toree_{1}/'.format(kernels_dir, args.cluster_name))
@@ -93,7 +93,7 @@
     text = text.replace('SPARK_PATH', spark_path)
     text = text.replace('OS_USER', args.os_user)
     text = text.replace('DATAENGINE-SERVICE_VERSION', args.dataproc_version)
-    text = text.replace('SCALA_VERSION', scala_version)
+    text = text.replace('SCALA_VERSION', args.scala_version)
     with open(kernel_path, 'w') as f:
         f.write(text)
     local('touch /tmp/kernel_var.json')
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_install_dataengine-service_kernels.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_install_dataengine-service_kernels.py
index 710974c..cb17668 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_install_dataengine-service_kernels.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_install_dataengine-service_kernels.py
@@ -70,6 +70,10 @@
 
 
 if __name__ == "__main__":
+    GCPActions().get_from_bucket(args.bucket, '{0}/{1}/scala_version'.format(args.project_name, args.cluster_name),
+                                 '/tmp/scala_version')
+    with file('/tmp/scala_version') as f:
+        scala_version = str(f.read()).replace(',', '')
     env.hosts = "{}".format(args.notebook_ip)
     env.user = args.os_user
     env.key_filename = "{}".format(args.keyfile)
@@ -84,7 +88,7 @@
     r_enabled = os.environ['notebook_r_enabled']
     sudo('echo "[global]" > /etc/pip.conf; echo "proxy = $(cat /etc/profile | grep proxy | head -n1 | cut -f2 -d=)" >> /etc/pip.conf')
     sudo('echo "use_proxy=yes" > ~/.wgetrc; proxy=$(cat /etc/profile | grep proxy | head -n1 | cut -f2 -d=); echo "http_proxy=$proxy" >> ~/.wgetrc; echo "https_proxy=$proxy" >> ~/.wgetrc')
-    sudo('unset http_proxy https_proxy; export gcp_project_id="{0}"; export conf_resource="{1}"; /usr/bin/python /usr/local/bin/create_configs.py --bucket {2} --cluster_name {3} --dataproc_version {4} --spark_version {5} --hadoop_version {6} --region {7} --user_name {8} --os_user {9} --pip_mirror {10} --application {11} --r_version {12} --r_enabled {13}'
+    sudo('unset http_proxy https_proxy; export gcp_project_id="{0}"; export conf_resource="{1}"; /usr/bin/python /usr/local/bin/create_configs.py --bucket {2} --cluster_name {3} --dataproc_version {4} --spark_version {5} --hadoop_version {6} --region {7} --user_name {8} --os_user {9} --pip_mirror {10} --application {11} --r_version {12} --r_enabled {13} --scala_version {14}'
          .format(os.environ['gcp_project_id'], os.environ['conf_resource'], args.bucket, args.cluster_name,
                  args.dataproc_version, spark_version, hadoop_version, args.region, args.project_name, args.os_user,
-                 args.pip_mirror, args.application, r_version, r_enabled))
+                 args.pip_mirror, args.application, r_version, r_enabled, scala_version))
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
new file mode 100644
index 0000000..ee95260
--- /dev/null
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+import json
+import sys
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+from fabric.api import *
+
+
+if __name__ == "__main__":
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        if os.environ['conf_os_family'] == 'debian':
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
+
+        logging.info('[CREATING DLAB SSH USER]')
+        print('[CREATING DLAB SSH USER]')
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
+
+        try:
+            local("~/scripts/{}.py {}".format('create_ssh_user', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    # configuring proxy on Notebook instance
+    try:
+        logging.info('[CONFIGURE PROXY ON JUPYTERLAB INSTANCE]')
+        print('[CONFIGURE PROXY ON JUPYTERLAB INSTANCE]')
+        additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
+        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
+            .format(instance_hostname, notebook_config['instance_name'], notebook_config['ssh_key_path'],
+                    json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_proxy', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    # updating repositories & installing python packages
+    try:
+        logging.info('[INSTALLING PREREQUISITES TO JUPYTERLAB NOTEBOOK INSTANCE]')
+        print('[INSTALLING PREREQUISITES TO JUPYTERLAB NOTEBOOK INSTANCE]')
+        params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".\
+            format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'],
+                   os.environ['gcp_region'], edge_instance_private_ip)
+        try:
+            local("~/scripts/{}.py {}".format('install_prerequisites', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    # installing and configuring jupiter and all dependencies
+    try:
+        logging.info('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
+        print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
+        params = "--hostname {} --keyfile {} --edge_ip {} " \
+                 "--region {} --spark_version {} " \
+                 "--hadoop_version {} --os_user {} " \
+                 "--scala_version {} --r_mirror {} " \
+                 "--exploratory_name {}".\
+            format(instance_hostname, notebook_config['ssh_key_path'], edge_instance_private_ip,
+                   os.environ['gcp_region'], os.environ['notebook_spark_version'],
+                   os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
+                   os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'],
+                   notebook_config['exploratory_name'],)
+        try:
+            local("~/scripts/{}.py {}".format('configure_jupyterlab_node', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[INSTALLING USERs KEY]')
+        logging.info('[INSTALLING USERs KEY]')
+        additional_config = {"user_keyname": os.environ['project_name'],
+                             "user_keydir": os.environ['conf_key_dir']}
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('install_user_key', params))
+        except:
+            dlab.fab.append_result("Failed installing users key")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[SETUP USER GIT CREDENTIALS]')
+        logging.info('[SETUP USER GIT CREDENTIALS]')
+        params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
+            .format(notebook_config['dlab_ssh_user'], instance_hostname, notebook_config['ssh_key_path'])
+        try:
+            local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
+            local("~/scripts/{}.py {}".format('manage_git_creds', params))
+        except:
+            dlab.fab.append_result("Failed setup git credentials")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    if notebook_config['shared_image_enabled'] == 'true':
+        try:
+            print('[CREATING IMAGE]')
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
+            if primary_image_id == '':
+                print("Looks like it's first time we configure notebook server. Creating images.")
+                image_id_list = GCPActions.create_image_from_instance_disks(
+                    notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
+                    notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
+                if image_id_list and image_id_list[0] != '':
+                    print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
+                else:
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
+                if image_id_list and image_id_list[1] != '':
+                    print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
+        except Exception as err:
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
+            sys.exit(1)
+
+    try:
+        print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        additional_info = {
+            'instance_hostname': instance_hostname,
+            'tensor': False
+        }
+        params = "--edge_hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} " \
+                 "--type {} " \
+                 "--exploratory_name {} " \
+                 "--additional_info '{}'"\
+            .format(edge_instance_private_ip,
+                    notebook_config['ssh_key_path'],
+                    notebook_config['dlab_ssh_user'],
+                    'jupyter',
+                    notebook_config['exploratory_name'],
+                    json.dumps(additional_info))
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
+        except:
+            dlab.fab.append_result("Failed edge reverse proxy template")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[CONFIGURING PROXY FOR DOCKER]')
+        logging.info('[CONFIGURING PROXY FOR DOCKER]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   notebook_config['ssh_key_path'],
+                   notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/configure_proxy_for_docker.py {}".format(params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+
+    try:
+        print('[STARTING JUPYTER CONTAINER]')
+        logging.info('[STARTING JUPYTER CONTAINER]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   notebook_config['ssh_key_path'],
+                   notebook_config['dlab_ssh_user'])
+        try:
+           local("~/scripts/jupyterlab_container_start.py {}".format(params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to start Jupyter container.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("JupyterLab URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "JupyterLab",
+                        "url": jupyter_notebook_acces_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_acces_url},
+                       #{"description": "JupyterLab (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
index e8ee8c0..f9822a0 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
@@ -22,11 +22,16 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
 import traceback
+import logging
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import uuid
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -36,106 +41,132 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    print('Generating infrastructure names and tags')
-    project_conf = dict()
-    project_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    project_conf['key_name'] = os.environ['conf_key_name']
-    project_conf['user_keyname'] = os.environ['project_name']
-    project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    project_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
-        else:
-            project_conf['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        project_conf['vpc_name'] = project_conf['service_base_name'] + '-ssn-vpc'
-    project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-    project_conf['private_subnet_name'] = '{0}-{1}-subnet'.format(project_conf['service_base_name'],
-                                                               project_conf['project_name'])
-    project_conf['subnet_name'] = os.environ['gcp_subnet_name']
-    project_conf['region'] = os.environ['gcp_region']
-    project_conf['zone'] = os.environ['gcp_zone']
-    project_conf['vpc_selflink'] = GCPMeta().get_vpc(project_conf['vpc_name'])['selfLink']
-    project_conf['private_subnet_prefix'] = os.environ['gcp_private_subnet_prefix']
-    project_conf['edge_service_account_name'] = '{}-{}-edge'.format(project_conf['service_base_name'],
-                                                                 project_conf['project_name'])
-    project_conf['edge_role_name'] = '{}-{}-edge'.format(project_conf['service_base_name'],
-                                                      project_conf['project_name'])
-    project_conf['ps_service_account_name'] = '{}-{}-ps'.format(project_conf['service_base_name'],
-                                                             project_conf['project_name'])
-    project_conf['ps_role_name'] = '{}-{}-ps'.format(project_conf['service_base_name'],
-                                                  project_conf['project_name'])
-    project_conf['ps_policy_path'] = '/root/files/ps_policy.json'
-    project_conf['ps_roles_path'] = '/root/files/ps_roles.json'
-    project_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(project_conf['service_base_name'],
-                                                          project_conf['project_name'], project_conf['endpoint_tag'])
-    project_conf['ssn_instance_name'] = '{}-ssn'.format(project_conf['service_base_name'])
-    project_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        project_conf = dict()
+        project_conf['edge_unique_index'] = str(uuid.uuid4())[:5]
+        project_conf['ps_unique_index'] = str(uuid.uuid4())[:5]
+        project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        project_conf['key_name'] = os.environ['conf_key_name']
+        project_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        project_conf['user_keyname'] = project_conf['project_name']
+        project_conf['project_tag'] = (project_conf['project_name'])
+        project_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        project_conf['endpoint_tag'] = project_conf['endpoint_name']
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                project_conf['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            project_conf['vpc_name'] = project_conf['service_base_name'] + '-vpc'
+        project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        project_conf['private_subnet_name'] = '{0}-{1}-{2}-subnet'.format(project_conf['service_base_name'],
+                                                                          project_conf['project_name'],
+                                                                          project_conf['endpoint_name'])
+        project_conf['subnet_name'] = os.environ['gcp_subnet_name']
+        project_conf['region'] = os.environ['gcp_region']
+        project_conf['zone'] = os.environ['gcp_zone']
+        project_conf['vpc_selflink'] = GCPMeta.get_vpc(project_conf['vpc_name'])['selfLink']
+        project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+        project_conf['edge_service_account_name'] = '{}-{}-{}-edge-sa'.format(project_conf['service_base_name'],
+                                                                              project_conf['project_name'],
+                                                                              project_conf['endpoint_name'])
+        project_conf['edge_role_name'] = '{}-{}-{}-{}-edge-role'.format(project_conf['service_base_name'],
+                                                                        project_conf['project_name'],
+                                                                        project_conf['endpoint_name'],
+                                                                        project_conf['edge_unique_index'])
+        project_conf['ps_service_account_name'] = '{}-{}-{}-ps-sa'.format(project_conf['service_base_name'],
+                                                                          project_conf['project_name'],
+                                                                          project_conf['endpoint_name'])
+        project_conf['ps_role_name'] = '{}-{}-{}-{}-ps-role'.format(project_conf['service_base_name'],
+                                                                    project_conf['project_name'],
+                                                                    project_conf['endpoint_name'],
+                                                                    project_conf['ps_unique_index'])
+        project_conf['ps_policy_path'] = '/root/files/ps_policy.json'
+        project_conf['ps_roles_path'] = '/root/files/ps_roles.json'
+        project_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(project_conf['service_base_name'],
+                                                                  project_conf['project_name'],
+                                                                  project_conf['endpoint_name'])
+        project_conf['ssn_instance_name'] = '{}-ssn'.format(project_conf['service_base_name'])
+        project_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+                                                                  project_conf['project_name'],
+                                                                  project_conf['endpoint_name'])
+        project_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(project_conf['service_base_name'],
+                                                                            project_conf['endpoint_name'])
+        project_conf['instance_size'] = os.environ['gcp_edge_instance_size']
+        project_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        project_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        project_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(project_conf['service_base_name'],
+                                                                             project_conf['project_name'],
+                                                                             project_conf['endpoint_name'])
+        project_conf['fw_edge_ingress_public'] = '{}-sg-ingress-public'.format(project_conf['instance_name'])
+        project_conf['fw_edge_ingress_internal'] = '{}-sg-ingress-internal'.format(project_conf['instance_name'])
+        project_conf['fw_edge_egress_public'] = '{}-sg-egress-public'.format(project_conf['instance_name'])
+        project_conf['fw_edge_egress_internal'] = '{}-sg-egress-internal'.format(project_conf['instance_name'])
+        project_conf['ps_firewall_target'] = '{0}-{1}-{2}-ps'.format(project_conf['service_base_name'],
+                                                                     project_conf['project_name'],
+                                                                     project_conf['endpoint_name'])
+        project_conf['fw_common_name'] = '{}-{}-{}-ps'.format(project_conf['service_base_name'],
                                                               project_conf['project_name'],
                                                               project_conf['endpoint_name'])
-    project_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(project_conf['service_base_name'],
-                                                                        project_conf['endpoint_name'])
-    project_conf['instance_size'] = os.environ['gcp_edge_instance_size']
-    project_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    project_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    project_conf['static_address_name'] = '{0}-{1}-ip'.format(project_conf['service_base_name'], project_conf['project_name'])
-    project_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(project_conf['instance_name'])
-    project_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(project_conf['instance_name'])
-    project_conf['fw_edge_egress_public'] = '{}-egress-public'.format(project_conf['instance_name'])
-    project_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(project_conf['instance_name'])
-    project_conf['ps_firewall_target'] = '{0}-{1}-ps'.format(project_conf['service_base_name'],
-                                                          project_conf['project_name'])
-    project_conf['fw_common_name'] = '{}-{}-ps'.format(project_conf['service_base_name'], project_conf['project_name'])
-    project_conf['fw_ps_ingress'] = '{}-ingress'.format(project_conf['fw_common_name'])
-    project_conf['fw_ps_egress_private'] = '{}-egress-private'.format(project_conf['fw_common_name'])
-    project_conf['fw_ps_egress_public'] = '{}-egress-public'.format(project_conf['fw_common_name'])
-    project_conf['network_tag'] = project_conf['instance_name']
-    project_conf['instance_labels'] = {"name": project_conf['instance_name'],
-                                    "sbn": project_conf['service_base_name'],
-                                    "project_tag": project_conf['project_tag'],
-                                    "endpoint_tag": project_conf['endpoint_tag'],
-                                    "product": "dlab"}
-    project_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+        project_conf['fw_ps_ingress'] = '{}-sg-ingress'.format(project_conf['fw_common_name'])
+        project_conf['fw_ps_egress_private'] = '{}-sg-egress-private'.format(project_conf['fw_common_name'])
+        project_conf['fw_ps_egress_public'] = '{}-sg-egress-public'.format(project_conf['fw_common_name'])
+        project_conf['network_tag'] = project_conf['instance_name']
+        project_conf['instance_labels'] = {"name": project_conf['instance_name'],
+                                           "sbn": project_conf['service_base_name'],
+                                           "project_tag": project_conf['project_tag'],
+                                           "endpoint_tag": project_conf['endpoint_tag'],
+                                           "product": "dlab"}
+        project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
+        project_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+        if 'conf_user_subnets_range' in os.environ:
+            project_conf['user_subnets_range'] = os.environ['conf_user_subnets_range']
+        else:
+            project_conf['user_subnets_range'] = ''
 
-    # FUSE in case of absence of user's key
-    try:
-        project_conf['user_key'] = os.environ['key']
+        # FUSE in case of absence of user's key
         try:
-            local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'], os.environ['project_name']))
-        except:
-            print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
-    except KeyError:
-        print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
-        sys.exit(1)
+            project_conf['user_key'] = os.environ['key']
+            try:
+                local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
+                                                        project_conf['project_name']))
+            except:
+                print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
+        except KeyError:
+            print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+            sys.exit(1)
 
-    print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(
-        project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
-    logging.info(json.dumps(project_conf))
+        print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(
+            project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+        logging.info(json.dumps(project_conf))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+        sys.exit(1)
 
     try:
         logging.info('[CREATE SUBNET]')
         print('[CREATE SUBNET]')
-        params = "--subnet_name {} --region {} --vpc_selflink {} --prefix {} --vpc_cidr {}" \
+        params = "--subnet_name {} --region {} --vpc_selflink {} --prefix {} --vpc_cidr {} --user_subnets_range '{}'" \
                  .format(project_conf['private_subnet_name'], project_conf['region'], project_conf['vpc_selflink'],
-                         project_conf['private_subnet_prefix'], project_conf['vpc_cidr'])
+                         project_conf['private_subnet_prefix'], project_conf['vpc_cidr'],
+                         project_conf['user_subnets_range'])
         try:
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
-            project_conf['private_subnet_cidr'] = GCPMeta().get_subnet(project_conf['private_subnet_name'],
-                                                                    project_conf['region'])['ipCidrRange']
+            project_conf['private_subnet_cidr'] = GCPMeta.get_subnet(project_conf['private_subnet_name'],
+                                                                     project_conf['region'])['ipCidrRange']
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+            GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         except:
             print("Subnet hasn't been created.")
-        append_result("Failed to create subnet.", str(err))
+        dlab.fab.append_result("Failed to create subnet.", str(err))
         sys.exit(1)
 
     print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
@@ -143,8 +174,9 @@
     try:
         logging.info('[CREATE SERVICE ACCOUNT AND ROLE FOR EDGE NODE]')
         print('[CREATE SERVICE ACCOUNT AND ROLE FOR EDGE NODE]')
-        params = "--service_account_name {} --role_name {}".format(project_conf['edge_service_account_name'],
-                                                                   project_conf['edge_role_name'])
+        params = "--service_account_name {} --role_name {} --unique_index {} --service_base_name {}".format(
+            project_conf['edge_service_account_name'], project_conf['edge_role_name'],
+            project_conf['edge_unique_index'], project_conf['service_base_name'])
 
         try:
             local("~/scripts/{}.py {}".format('common_create_service_account', params))
@@ -152,22 +184,23 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-            GCPActions().remove_role(project_conf['edge_role_name'])
+            GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                              project_conf['service_base_name'])
+            GCPActions.remove_role(project_conf['edge_role_name'])
         except:
             print("Service account or role hasn't been created")
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
-        append_result("Failed to creating service account and role.", str(err))
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to creating service account and role.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATE SERVICE ACCOUNT AND ROLE FOR PRIVATE SUBNET]')
         print('[CREATE SERVICE ACCOUNT AND ROLE FOR NOTEBOOK NODE]')
-        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {}".format(
-            project_conf['ps_service_account_name'], project_conf['ps_role_name'],
-            project_conf['ps_policy_path'], project_conf['ps_roles_path'])
+        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} " \
+                 "--service_base_name {}".format(
+                  project_conf['ps_service_account_name'], project_conf['ps_role_name'], project_conf['ps_policy_path'],
+                  project_conf['ps_roles_path'], project_conf['ps_unique_index'], project_conf['service_base_name'])
 
         try:
             local("~/scripts/{}.py {}".format('common_create_service_account', params))
@@ -175,16 +208,17 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            GCPActions().remove_service_account(project_conf['ps_service_account_name'])
-            GCPActions().remove_role(project_conf['ps_role_name'])
+            GCPActions.remove_service_account(project_conf['ps_service_account_name'],
+                                              project_conf['service_base_name'])
+            GCPActions.remove_role(project_conf['ps_role_name'])
         except:
             print("Service account or role hasn't been created")
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
-        append_result("Failed to creating service account and role.", str(err))
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to creating service account and role.", str(err))
         sys.exit(1)
 
     try:
@@ -202,7 +236,7 @@
         rules = [
             {
                 'IPProtocol': 'tcp',
-                'ports': ['22', '80', '3128']
+                'ports': ['22', '80', '443', '3128']
             }
         ]
         ingress_rule['allowed'] = rules
@@ -266,13 +300,13 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        append_result("Failed to create firewall for Edge node.", str(err))
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        dlab.fab.append_result("Failed to create firewall for Edge node.", str(err))
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
@@ -288,8 +322,8 @@
             project_conf['ps_firewall_target']
         ]
         ingress_rule['sourceRanges'] = [project_conf['private_subnet_cidr'],
-                                        GCPMeta().get_subnet(project_conf['subnet_name'],
-                                                             project_conf['region'])['ipCidrRange']
+                                        GCPMeta.get_subnet(project_conf['subnet_name'],
+                                                           project_conf['region'])['ipCidrRange']
                                         ]
         rules = [
             {
@@ -307,8 +341,8 @@
             project_conf['ps_firewall_target']
         ]
         egress_rule['destinationRanges'] = [project_conf['private_subnet_cidr'],
-                                            GCPMeta().get_subnet(project_conf['subnet_name'],
-                                                                 project_conf['region'])['ipCidrRange']
+                                            GCPMeta.get_subnet(project_conf['subnet_name'],
+                                                               project_conf['region'])['ipCidrRange']
                                             ]
         rules = [
             {
@@ -344,23 +378,45 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create firewall for private subnet.", str(err))
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to create firewall for private subnet.", str(err))
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE BUCKETS]')
         print('[CREATE BUCKETS]')
-        params = "--bucket_name {}".format(project_conf['bucket_name'])
+        project_conf['shared_bucket_tags'] = {
+            project_conf['tag_name']: project_conf['shared_bucket_name'],
+            "endpoint_tag": project_conf['endpoint_tag'],
+            os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
+            "sbn": project_conf['service_base_name'],
+            "name": project_conf['shared_bucket_name']}
+        params = "--bucket_name {} --tags '{}'".format(project_conf['shared_bucket_name'],
+                                                       json.dumps(project_conf['shared_bucket_tags']))
+        try:
+            local("~/scripts/{}.py {}".format('common_create_bucket', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+
+        project_conf['bucket_tags'] = {
+            project_conf['tag_name']: project_conf['bucket_name'],
+            "endpoint_tag": project_conf['endpoint_tag'],
+            os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
+            "sbn": project_conf['service_base_name'],
+            "project_tag": project_conf['project_tag'],
+            "name": project_conf['bucket_name']}
+        params = "--bucket_name {} --tags '{}'".format(project_conf['bucket_name'],
+                                                       json.dumps(project_conf['bucket_tags']))
 
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
@@ -368,43 +424,45 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create bucket.", str(err))
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Unable to create bucket.", str(err))
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
         logging.info('[SET PERMISSIONS FOR USER AND SHARED BUCKETS]')
         print('[SET PERMISSIONS FOR USER AND SHARED BUCKETS]')
-        GCPActions().set_bucket_owner(project_conf['bucket_name'], project_conf['ps_service_account_name'])
-        GCPActions().set_bucket_owner(project_conf['shared_bucket_name'], project_conf['ps_service_account_name'])
+        GCPActions.set_bucket_owner(project_conf['bucket_name'], project_conf['ps_service_account_name'],
+                                    project_conf['service_base_name'])
+        GCPActions.set_bucket_owner(project_conf['shared_bucket_name'], project_conf['ps_service_account_name'],
+                                    project_conf['service_base_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set bucket permissions.", str(err))
-        GCPActions().remove_bucket(project_conf['bucket_name'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to set bucket permissions.", str(err))
+        GCPActions.remove_bucket(project_conf['bucket_name'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
@@ -417,64 +475,67 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create static ip.", str(err))
+        dlab.fab.append_result("Failed to create static ip.", str(err))
         try:
-            GCPActions().remove_static_address(project_conf['static_address_name'], project_conf['region'])
+            GCPActions.remove_static_address(project_conf['static_address_name'], project_conf['region'])
         except:
             print("Static IP address hasn't been created.")
-        GCPActions().remove_bucket(project_conf['bucket_name'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        GCPActions.remove_bucket(project_conf['bucket_name'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
+        project_conf['initial_user'] = 'ubuntu'
+        project_conf['sudo_group'] = 'sudo'
     if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
+        project_conf['initial_user'] = 'ec2-user'
+        project_conf['sudo_group'] = 'wheel'
 
     try:
         project_conf['static_ip'] = \
-            GCPMeta().get_static_address(project_conf['region'], project_conf['static_address_name'])['address']
+            GCPMeta.get_static_address(project_conf['region'], project_conf['static_address_name'])['address']
         logging.info('[CREATE EDGE INSTANCE]')
         print('[CREATE EDGE INSTANCE]')
-        params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} --ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} --instance_class {} --static_ip {} --network_tag {} --labels '{}'".\
-            format(project_conf['instance_name'], project_conf['region'], project_conf['zone'], project_conf['vpc_name'],
-                   project_conf['subnet_name'], project_conf['instance_size'], project_conf['ssh_key_path'], initial_user,
-                   project_conf['edge_service_account_name'], project_conf['image_name'], 'edge', project_conf['static_ip'],
-                   project_conf['network_tag'], json.dumps(project_conf['instance_labels']))
+        params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} " \
+                 "--ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} --instance_class {} " \
+                 "--static_ip {} --network_tag {} --labels '{}' --service_base_name {}".format(
+                  project_conf['instance_name'], project_conf['region'], project_conf['zone'], project_conf['vpc_name'],
+                  project_conf['subnet_name'], project_conf['instance_size'], project_conf['ssh_key_path'],
+                  project_conf['initial_user'], project_conf['edge_service_account_name'], project_conf['image_name'],
+                  'edge', project_conf['static_ip'], project_conf['network_tag'],
+                  json.dumps(project_conf['instance_labels']), project_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        GCPActions().remove_static_address(project_conf['static_address_name'], project_conf['region'])
-        GCPActions().remove_bucket(project_conf['bucket_name'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        GCPActions.remove_static_address(project_conf['static_address_name'], project_conf['region'])
+        GCPActions.remove_bucket(project_conf['bucket_name'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
index 280f3c3..96c021d 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
@@ -22,119 +22,142 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import logging
+import traceback
+import requests
 
 
-def terminate_edge_node(project_name, service_base_name, region, zone):
+def terminate_edge_node(endpoint_name, project_name, service_base_name, region, zone):
     print("Terminating Dataengine-service clusters")
     try:
         labels = [
             {'sbn': service_base_name},
             {'project_tag': project_name}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine-service", str(err))
         sys.exit(1)
 
     print("Terminating EDGE and notebook instances")
-    base = '{}-{}'.format(service_base_name, project_name)
-    keys = ['edge', 'ps', 'ip', 'bucket', 'subnet']
+    base = '{}-{}-{}'.format(service_base_name, project_name, endpoint_name)
+    keys = ['edge', 'ps', 'static-ip', 'bucket', 'subnet']
     targets = ['{}-{}'.format(base, k) for k in keys]
     try:
-        instances = GCPMeta().get_list_instances(zone, base)
+        instances = GCPMeta.get_list_instances(zone, base)
         if 'items' in instances:
             for i in instances['items']:
                 if 'project_tag' in i['labels'] and project_name == i['labels']['project_tag']:
-                    GCPActions().remove_instance(i['name'], zone)
+                    GCPActions.remove_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instances", str(err))
         sys.exit(1)
 
     print("Removing static addresses")
     try:
-        static_addresses = GCPMeta().get_list_static_addresses(region, base)
+        static_addresses = GCPMeta.get_list_static_addresses(region, base)
         if 'items' in static_addresses:
             for i in static_addresses['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_static_address(i['name'], region)
+                    GCPActions.remove_static_address(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static addresses", str(err))
         sys.exit(1)
 
     print("Removing storage bucket")
     try:
-        buckets = GCPMeta().get_list_buckets(base)
+        buckets = GCPMeta.get_list_buckets(base)
         if 'items' in buckets:
             for i in buckets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_bucket(i['name'])
+                    GCPActions.remove_bucket(i['name'])
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove storage buckets", str(err))
+        sys.exit(1)
+
+    print("Removing project specific images")
+    try:
+        project_image_name_beginning = '{}-{}'.format(service_base_name, project_name)
+        images = GCPMeta.get_list_images(project_image_name_beginning)
+        if 'items' in images:
+            for i in images['items']:
+                GCPActions.remove_image(i['name'])
     except Exception as err:
         print('Error: {0}'.format(err))
         sys.exit(1)
 
     print("Removing firewalls")
     try:
-        firewalls = GCPMeta().get_list_firewalls(base)
+        firewalls = GCPMeta.get_list_firewalls(base)
         if 'items' in firewalls:
             for i in firewalls['items']:
                 if bool(set(targets) & set(i['targetTags'])):
-                    GCPActions().remove_firewall(i['name'])
+                    GCPActions.remove_firewall(i['name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing Service accounts and roles")
     try:
-        list_service_accounts = GCPMeta().get_list_service_accounts()
-        for service_account in (set(targets) & set(list_service_accounts)):
-            if service_account.startswith(service_base_name):
-                GCPActions().remove_service_account(service_account)
-        list_roles_names = GCPMeta().get_list_roles()
-        for role in (set(targets) & set(list_roles_names)):
-            if role.startswith(service_base_name):
-                GCPActions().remove_role(role)
+        list_service_accounts = GCPMeta.get_list_service_accounts()
+        sa_keys = ['edge-sa', 'ps-sa']
+        role_keys = ['edge-role', 'ps-role']
+        sa_target = ['{}-{}'.format(base, k) for k in sa_keys]
+        indexes = [GCPMeta.get_index_by_service_account_name('{}-{}'.format(base, k)) for k in sa_keys]
+        role_targets = ['{}-{}-{}'.format(base, i, k) for k in role_keys for i in indexes]
+        for service_account in (set(sa_target) & set(list_service_accounts)):
+            GCPActions.remove_service_account(service_account, service_base_name)
+        list_roles_names = GCPMeta.get_list_roles()
+        for role in (set(role_targets) & set(list_roles_names)):
+            GCPActions.remove_role(role)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove service accounts and roles", str(err))
         sys.exit(1)
 
     print("Removing subnets")
     try:
-        list_subnets = GCPMeta().get_list_subnetworks(region, '', base)
+        list_subnets = GCPMeta.get_list_subnetworks(region, '', base)
         if 'items' in list_subnets:
             vpc_selflink = list_subnets['items'][0]['network']
             vpc_name = vpc_selflink.split('/')[-1]
-            subnets = GCPMeta().get_list_subnetworks(region, vpc_name, base)
+            subnets = GCPMeta.get_list_subnetworks(region, vpc_name, base)
             for i in subnets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_subnet(i['name'], region)
+                    GCPActions.remove_subnet(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnets", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/project/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     project_conf = dict()
-    project_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
+    project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+    project_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    project_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+    project_conf['project_tag'] = project_conf['project_name']
     project_conf['region'] = os.environ['gcp_region']
     project_conf['zone'] = os.environ['gcp_zone']
 
@@ -142,22 +165,61 @@
         logging.info('[TERMINATE EDGE]')
         print('[TERMINATE EDGE]')
         try:
-            terminate_edge_node(project_conf['project_name'], project_conf['service_base_name'],
+            terminate_edge_node(project_conf['endpoint_name'], project_conf['project_name'],
+                                project_conf['service_base_name'],
                                 project_conf['region'], project_conf['zone'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
+            dlab.fab.append_result("Failed to terminate edge.", str(err))
     except Exception as err:
         print('Error: {0}'.format(err))
         sys.exit(1)
 
     try:
+        print('[KEYCLOAK PROJECT CLIENT DELETE]')
+        logging.info('[KEYCLOAK PROJECT CLIENT DELETE]')
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            os.environ['keycloak_auth_server_url'])
+        keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+                                                                    os.environ['keycloak_realm_name'])
+
+        keycloak_auth_data = {
+            "username": os.environ['keycloak_user'],
+            "password": os.environ['keycloak_user_password'],
+            "grant_type": "password",
+            "client_id": "admin-cli",
+        }
+
+        client_params = {
+            "clientId": "{}-{}-{}".format(project_conf['service_base_name'], project_conf['project_name'],
+                                          project_conf['endpoint_name'])
+        }
+
+        keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
+
+        keycloak_get_id_client = requests.get(keycloak_client_url, data=keycloak_auth_data, params=client_params,
+                                              headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
+                                                       "Content-Type": "application/json"})
+        json_keycloak_client_id = json.loads(keycloak_get_id_client.text)
+        keycloak_id_client = json_keycloak_client_id[0]['id']
+
+        keycloak_client_delete_url = '{0}/admin/realms/{1}/clients/{2}'.format(os.environ['keycloak_auth_server_url'],
+                                                                               os.environ['keycloak_realm_name'],
+                                                                               keycloak_id_client)
+
+        keycloak_client = requests.delete(keycloak_client_delete_url,
+                                          headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
+                                                   "Content-Type": "application/json"})
+    except Exception as err:
+        print("Failed to remove project client from Keycloak", str(err))
+
+    try:
         with open("/root/result.json", 'w') as result:
-            res = {"service_base_name": edge_conf['service_base_name'],
-                   "project_name": edge_conf['project_name'],
+            res = {"service_base_name": project_conf['service_base_name'],
+                   "project_name": project_conf['project_name'],
                    "Action": "Terminate project"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
index 00ac3fa..6ea7a90 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -38,68 +40,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['ip_address'] = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    notebook_config['rstudio_pass'] = id_generator()
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        notebook_config['ip_address'] = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -107,9 +119,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -127,9 +138,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -145,9 +155,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring RStudio and all dependencies
@@ -157,7 +166,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
             .format(instance_hostname, notebook_config['ssh_key_path'],
                     os.environ['gcp_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -169,9 +178,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure RStudio.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure RStudio.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -180,16 +188,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -201,35 +209,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
         
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -254,54 +261,58 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    rstudio_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "RStudio",
-                    "url": rstudio_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_acces_url}#,
-                   #{"description": "RStudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "RStudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "RStudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
index 5ce4f63..dd622d2 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
@@ -21,14 +21,20 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
 import sys, os
 from fabric.api import *
-from dlab.ssn_lib import *
 import traceback
 import json
+import argparse
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--ssn_unique_index', type=str, default='')
+args = parser.parse_args()
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -36,52 +42,58 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
+
+    def clear_resources():
+        GCPActions.remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
+        GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+        GCPActions.remove_role(ssn_conf['role_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if  not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
 
     try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-        pre_defined_vpc = False
-        pre_defined_subnet = False
-        pre_defined_firewall = False
-        billing_enabled = True
-
         ssn_conf = dict()
-        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
+        ssn_conf['instance'] = 'ssn'
+        ssn_conf['pre_defined_vpc'] = False
+        ssn_conf['pre_defined_subnet'] = False
+        ssn_conf['pre_defined_firewall'] = False
+        ssn_conf['billing_enabled'] = True
+
+        ssn_conf['ssn_unique_index'] = args.ssn_unique_index
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['role_name'] = '{}-{}-ssn-role'.format(ssn_conf['service_base_name'], ssn_conf['ssn_unique_index'])
         ssn_conf['region'] = os.environ['gcp_region']
         ssn_conf['zone'] = os.environ['gcp_zone']
-        ssn_conf['ssn_bucket_name'] = '{}-ssn-bucket'.format(ssn_conf['service_base_name'])
         ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
-        ssn_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(ssn_conf['service_base_name'],
-                                                                        ssn_conf['default_endpoint_name'])
         ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
         ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
-        ssn_conf['subnet_cidr'] = '10.10.1.0/24'
-        ssn_conf['firewall_name'] = '{}-ssn-firewall'.format(ssn_conf['service_base_name'])
-        ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-        ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
-        ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-        ssn_conf['role_name'] = ssn_conf['service_base_name'] + '-ssn-role'
-
         try:
             if os.environ['gcp_vpc_name'] == '':
                 raise KeyError
             else:
-                pre_defined_vpc = True
+                ssn_conf['pre_defined_vpc'] = True
                 ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
         except KeyError:
-            ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
+            ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
 
         try:
             if os.environ['gcp_subnet_name'] == '':
                 raise KeyError
             else:
-                pre_defined_subnet = True
+                ssn_conf['pre_defined_subnet'] = True
                 ssn_conf['subnet_name'] = os.environ['gcp_subnet_name']
         except KeyError:
-            ssn_conf['subnet_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
+            ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
         try:
             if os.environ['gcp_firewall_name'] == '':
                 raise KeyError
@@ -89,7 +101,11 @@
                 pre_defined_firewall = True
                 ssn_conf['firewall_name'] = os.environ['gcp_firewall_name']
         except KeyError:
-            ssn_conf['firewall_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
+            ssn_conf['firewall_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+        ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
+        ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
 
         try:
             if os.environ['aws_account_id'] == '':
@@ -97,41 +113,36 @@
             if os.environ['aws_billing_bucket'] == '':
                 raise KeyError
         except KeyError:
-            billing_enabled = False
-        if not billing_enabled:
+            ssn_conf['billing_enabled'] = False
+        if not ssn_conf['billing_enabled']:
             os.environ['aws_account_id'] = 'None'
             os.environ['aws_billing_bucket'] = 'None'
             os.environ['aws_report_path'] = 'None'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed deriving names.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed deriving names.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        instance_hostname = GCPMeta().get_instance_public_ip_by_name(ssn_conf['instance_name'])
+        ssn_conf['instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(ssn_conf['instance_name'])
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            ssn_conf['step_cert_sans'] = ' --san {0} --san {1}'.format(GCPMeta.get_instance_public_ip_by_name(
+                ssn_conf['instance_name']), dlab.meta_lib.get_instance_private_ip_address('ssn',
+                                                                                          ssn_conf['instance_name']))
+        else:
+            ssn_conf['step_cert_sans'] = ''
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            ssn_conf['initial_user'] = 'ubuntu'
+            ssn_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            ssn_conf['initial_user'] = 'ec2-user'
+            ssn_conf['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, ssn_conf['ssh_key_path'], initial_user, ssn_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], ssn_conf['initial_user'],
+            ssn_conf['dlab_ssh_user'], ssn_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -139,20 +150,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -161,7 +160,7 @@
         params = "--hostname {} --keyfile {} --pip_packages " \
                  "'boto3 backoff argparse fabric==1.14.0 awscli pymongo pyyaml " \
                  "google-api-python-client google-cloud-storage pycrypto' --user {} --region {}". \
-            format(instance_hostname, ssn_conf['ssh_key_path'],
+            format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'],
                    ssn_conf['dlab_ssh_user'], ssn_conf['region'])
 
         try:
@@ -170,20 +169,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing software: pip, packages.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -193,9 +180,11 @@
                              "service_base_name": ssn_conf['service_base_name'],
                              "security_group_id": ssn_conf['firewall_name'], "vpc_id": ssn_conf['vpc_name'],
                              "subnet_id": ssn_conf['subnet_name'], "admin_key": os.environ['conf_key_name']}
-        params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} --tag_resource_id {}". \
-            format(instance_hostname, ssn_conf['ssh_key_path'], json.dumps(additional_config),
-                   ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
+                 "--tag_resource_id {} --step_cert_sans '{}'". \
+            format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
+                   ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'],
+                   ssn_conf['step_cert_sans'])
 
         try:
             local("~/scripts/{}.py {}".format('configure_ssn_node', params))
@@ -203,20 +192,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed configuring ssn.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed configuring ssn.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -226,15 +203,18 @@
                              {"name": "project", "tag": "latest"},
                              {"name": "edge", "tag": "latest"},
                              {"name": "jupyter", "tag": "latest"},
+                             {"name": "jupyterlab", "tag": "latest"},
                              {"name": "rstudio", "tag": "latest"},
                              {"name": "zeppelin", "tag": "latest"},
+                             {"name": "superset", "tag": "latest"},
                              {"name": "tensor", "tag": "latest"},
                              {"name": "tensor-rstudio", "tag": "latest"},
                              {"name": "deeplearning", "tag": "latest"},
                              {"name": "dataengine", "tag": "latest"},
                              {"name": "dataengine-service", "tag": "latest"}]
-        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} --cloud_provider {} --region {}". \
-            format(instance_hostname, ssn_conf['ssh_key_path'], json.dumps(additional_config),
+        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
+                 "--cloud_provider {} --region {}". \
+            format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
                    os.environ['conf_os_family'], ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
                    os.environ['conf_cloud_provider'], ssn_conf['region'])
 
@@ -244,20 +224,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to configure docker.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Unable to configure docker.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -267,7 +235,7 @@
         cloud_params = [
             {
                 'key': 'KEYCLOAK_REDIRECT_URI',
-                'value': "http://{0}/".format(instance_hostname)
+                'value': "https://{0}/".format(ssn_conf['instance_hostname'])
             },
             {
                 'key': 'KEYCLOAK_REALM_NAME',
@@ -286,6 +254,14 @@
                 'value': os.environ['keycloak_client_secret']
             },
             {
+                'key': 'KEYCLOAK_USER_NAME',
+                'value': os.environ['keycloak_user']
+            },
+            {
+                'key': 'KEYCLOAK_PASSWORD',
+                'value': os.environ['keycloak_user_password']
+            },
+            {
                 'key': 'CONF_OS',
                 'value': os.environ['conf_os_family']
             },
@@ -390,39 +366,80 @@
                 'value': os.environ['conf_image_enabled']
             },
             {
-                'key': 'SHARED_IMAGE_ENABLED',
-                'value': os.environ['conf_shared_image_enabled']
+                'key': "AZURE_AUTH_FILE_PATH",
+                'value': ""
             }
         ]
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            cloud_params.append(
+                {
+                    'key': 'STEP_CERTS_ENABLED',
+                    'value': os.environ['conf_stepcerts_enabled']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_ROOT_CA',
+                    'value': os.environ['conf_stepcerts_root_ca']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_ID',
+                    'value': os.environ['conf_stepcerts_kid']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_PASSWORD',
+                    'value': os.environ['conf_stepcerts_kid_password']
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_CA_URL',
+                    'value': os.environ['conf_stepcerts_ca_url']
+                })
+        else:
+            cloud_params.append(
+                {
+                    'key': 'STEP_CERTS_ENABLED',
+                    'value': 'false'
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_ROOT_CA',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_ID',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_KID_PASSWORD',
+                    'value': ''
+                })
+            cloud_params.append(
+                {
+                    'key': 'STEP_CA_URL',
+                    'value': ''
+                })
         params = "--hostname {} --keyfile {} --dlab_path {} --os_user {} --os_family {} --billing_enabled {} " \
                  "--request_id {} --billing_dataset_name {} \
                  --resource {} --service_base_name {} --cloud_provider {} --default_endpoint_name {} " \
-                 "--cloud_params '{}'". \
-            format(instance_hostname, ssn_conf['ssh_key_path'], os.environ['ssn_dlab_path'], ssn_conf['dlab_ssh_user'],
-                   os.environ['conf_os_family'], billing_enabled, os.environ['request_id'],
+                 "--cloud_params '{}' --keycloak_client_id {} --keycloak_client_secret {} --keycloak_auth_server_url {}". \
+            format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], os.environ['ssn_dlab_path'], ssn_conf['dlab_ssh_user'],
+                   os.environ['conf_os_family'], ssn_conf['billing_enabled'], os.environ['request_id'],
                    os.environ['billing_dataset_name'], os.environ['conf_resource'],
                    ssn_conf['service_base_name'], os.environ['conf_cloud_provider'], ssn_conf['default_endpoint_name'],
-                   json.dumps(cloud_params))
+                   json.dumps(cloud_params), os.environ['keycloak_client_name'], os.environ['keycloak_client_secret'],
+                   os.environ['keycloak_auth_server_url'])
         try:
             local("~/scripts/{}.py {}".format('configure_ui', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to configure UI.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Unable to configure UI.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -430,7 +447,7 @@
         print('[SUMMARY]')
         print("Service base name: {}".format(ssn_conf['service_base_name']))
         print("SSN Name: {}".format(ssn_conf['instance_name']))
-        print("SSN Hostname: {}".format(instance_hostname))
+        print("SSN Hostname: {}".format(ssn_conf['instance_hostname']))
         print("Role name: {}".format(ssn_conf['role_name']))
         print("Key name: {}".format(os.environ['conf_key_name']))
         print("VPC Name: {}".format(ssn_conf['vpc_name']))
@@ -438,14 +455,13 @@
         print("Firewall Names: {}".format(ssn_conf['firewall_name']))
         print("SSN instance size: {}".format(ssn_conf['instance_size']))
         print("SSN AMI name: {}".format(ssn_conf['image_name']))
-        print("SSN bucket name: {}".format(ssn_conf['ssn_bucket_name']))
         print("Region: {}".format(ssn_conf['region']))
-        jenkins_url = "http://{}/jenkins".format(instance_hostname)
-        jenkins_url_https = "https://{}/jenkins".format(instance_hostname)
+        jenkins_url = "http://{}/jenkins".format(ssn_conf['instance_hostname'])
+        jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instance_hostname'])
         print("Jenkins URL: {}".format(jenkins_url))
         print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
-        print("DLab UI HTTP URL: http://{}".format(instance_hostname))
-        print("DLab UI HTTPS URL: https://{}".format(instance_hostname))
+        print("DLab UI HTTP URL: http://{}".format(ssn_conf['instance_hostname']))
+        print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instance_hostname']))
         try:
             with open('jenkins_creds.txt') as f:
                 print(f.read())
@@ -455,37 +471,23 @@
         with open("/root/result.json", 'w') as f:
             res = {"service_base_name": ssn_conf['service_base_name'],
                    "instance_name": ssn_conf['instance_name'],
-                   "instance_hostname": instance_hostname,
+                   "instance_hostname": ssn_conf['instance_hostname'],
                    "role_name": ssn_conf['role_name'],
-                   #"role_profile_name": role_profile_name,
-                   #"policy_name": policy_name,
                    "master_keyname": os.environ['conf_key_name'],
                    "vpc_id": ssn_conf['vpc_name'],
                    "subnet_id": ssn_conf['subnet_name'],
                    "security_id": ssn_conf['firewall_name'],
                    "instance_shape": ssn_conf['instance_size'],
-                   "bucket_name": ssn_conf['ssn_bucket_name'],
-                   "shared_bucket_name": ssn_conf['shared_bucket_name'],
                    "region": ssn_conf['region'],
                    "action": "Create SSN instance"}
             f.write(json.dumps(res))
 
         print('Upload response file')
         params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
-            format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'], instance_hostname)
+            format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'],
+                   ssn_conf['instance_hostname'])
         local("~/scripts/{}.py {}".format('upload_response_file', params))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
index be55481..8cf209d 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
@@ -21,68 +21,78 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
 import sys, os
 from fabric.api import *
-from dlab.ssn_lib import *
 import json
+import argparse
+import logging
+import traceback
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 
+parser = argparse.ArgumentParser()
+parser.add_argument('--ssn_unique_index', type=str, default='')
+args = parser.parse_args()
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
-    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
-    pre_defined_vpc = False
-    pre_defined_subnet = False
-    pre_defined_firewall = False
-    logging.info('[DERIVING NAMES]')
-    print('[DERIVING NAMES]')
-    ssn_conf = dict()
-    ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
-    ssn_conf['region'] = os.environ['gcp_region']
-    ssn_conf['zone'] = os.environ['gcp_zone']
-    ssn_conf['ssn_bucket_name'] = '{}-ssn-bucket'.format(ssn_conf['service_base_name'])
-    ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
-    ssn_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(ssn_conf['service_base_name'],
-                                                                    ssn_conf['default_endpoint_name'])
-    ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
-    ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
-    ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
-    ssn_conf['subnet_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
-    ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-    ssn_conf['subnet_prefix'] = '20'
-    ssn_conf['firewall_name'] = '{}-ssn-firewall'.format(ssn_conf['service_base_name'])
-    ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
-    ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    ssn_conf['role_name'] = ssn_conf['service_base_name'] + '-ssn-role'
-    ssn_conf['static_address_name'] = '{}-ssn-ip'.format(ssn_conf['service_base_name'])
-    ssn_conf['ssn_policy_path'] = '/root/files/ssn_policy.json'
-    ssn_conf['ssn_roles_path'] = '/root/files/ssn_roles.json'
-    ssn_conf['network_tag'] = ssn_conf['instance_name']
-    ssn_conf['instance_labels'] = {"name": ssn_conf['instance_name'],
-                                   "sbn": ssn_conf['service_base_name'],
-                                   os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-    ssn_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+    try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        ssn_conf = dict()
+        ssn_conf['instance'] = 'ssn'
+        ssn_conf['pre_defined_vpc'] = False
+        ssn_conf['pre_defined_subnet'] = False
+        ssn_conf['pre_defined_firewall'] = False
+        logging.info('[DERIVING NAMES]')
+        print('[DERIVING NAMES]')
+        ssn_conf['ssn_unique_index'] = args.ssn_unique_index
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+                os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
+        ssn_conf['region'] = os.environ['gcp_region']
+        ssn_conf['zone'] = os.environ['gcp_zone']
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
+        ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
+        ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+        ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        ssn_conf['subnet_prefix'] = '20'
+        ssn_conf['firewall_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+        ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
+        ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        ssn_conf['role_name'] = '{}-{}-ssn-role'.format(ssn_conf['service_base_name'], ssn_conf['ssn_unique_index'])
+        ssn_conf['static_address_name'] = '{}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+        ssn_conf['ssn_policy_path'] = '/root/files/ssn_policy.json'
+        ssn_conf['ssn_roles_path'] = '/root/files/ssn_roles.json'
+        ssn_conf['network_tag'] = ssn_conf['instance_name']
+        ssn_conf['instance_labels'] = {"name": ssn_conf['instance_name'],
+                                       "sbn": ssn_conf['service_base_name'],
+                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        ssn_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+    except Exception as err:
+        dlab.fab.dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
 
-    if GCPMeta().get_instance(ssn_conf['instance_name']):
-        print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+    if GCPMeta.get_instance(ssn_conf['instance_name']):
+        dlab.fab.dlab.fab.append_result("Service base name should be unique and less or equal 20 symbols. "
+                                        "Please try again.")
         sys.exit(1)
 
     try:
         if os.environ['gcp_vpc_name'] == '':
             raise KeyError
         else:
+            ssn_conf['pre_defined_vpc'] = True
             ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
     except KeyError:
         try:
-            pre_defined_vpc = True
             logging.info('[CREATE VPC]')
             print('[CREATE VPC]')
             params = "--vpc_name {}".format(ssn_conf['vpc_name'])
@@ -93,29 +103,28 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create VPC. Exception:" + str(err))
-            if pre_defined_vpc:
+            dlab.fab.append_result("Failed to create VPC.", str(err))
+            if not ssn_conf['pre_defined_vpc']:
                 try:
-                    GCPActions().remove_vpc(ssn_conf['vpc_name'])
+                    GCPActions.remove_vpc(ssn_conf['vpc_name'])
                 except:
                     print("VPC hasn't been created.")
             sys.exit(1)
 
     try:
-        ssn_conf['vpc_selflink'] = GCPMeta().get_vpc(ssn_conf['vpc_name'])['selfLink']
+        ssn_conf['vpc_selflink'] = GCPMeta.get_vpc(ssn_conf['vpc_name'])['selfLink']
         if os.environ['gcp_subnet_name'] == '':
             raise KeyError
         else:
+            ssn_conf['pre_defined_subnet'] = True
             ssn_conf['subnet_name'] = os.environ['gcp_subnet_name']
     except KeyError:
         try:
-            pre_defined_subnet = True
             logging.info('[CREATE SUBNET]')
             print('[CREATE SUBNET]')
-            params = "--subnet_name {} --region {} --vpc_selflink {} --prefix {} --vpc_cidr {}".\
+            params = "--subnet_name {} --region {} --vpc_selflink {} --prefix {} --vpc_cidr {} --ssn {}".\
                 format(ssn_conf['subnet_name'], ssn_conf['region'], ssn_conf['vpc_selflink'], ssn_conf['subnet_prefix'],
-                       ssn_conf['vpc_cidr'])
+                       ssn_conf['vpc_cidr'], True)
             try:
                 local("~/scripts/{}.py {}".format('common_create_subnet', params))
                 os.environ['gcp_subnet_name'] = ssn_conf['subnet_name']
@@ -123,14 +132,14 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create Subnet.", str(err))
-            if pre_defined_vpc:
+            dlab.fab.append_result("Failed to create Subnet.", str(err))
+            if not ssn_conf['pre_defined_subnet']:
                 try:
-                    GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+                    GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
                 except:
                     print("Subnet hasn't been created.")
-                GCPActions().remove_vpc(ssn_conf['vpc_name'])
+            if not ssn_conf['pre_defined_vpc']:
+                GCPActions.remove_vpc(ssn_conf['vpc_name'])
             sys.exit(1)
 
 
@@ -138,10 +147,10 @@
         if os.environ['gcp_firewall_name'] == '':
             raise KeyError
         else:
+            ssn_conf['pre_defined_firewall'] = True
             ssn_conf['firewall_name'] = os.environ['gcp_firewall_name']
     except KeyError:
         try:
-            pre_defined_firewall = True
             logging.info('[CREATE FIREWALL]')
             print('[CREATE FIREWALL]')
             firewall_rules = dict()
@@ -149,7 +158,7 @@
             firewall_rules['egress'] = []
 
             ingress_rule = dict()
-            ingress_rule['name'] = ssn_conf['firewall_name'] + '-ingress'
+            ingress_rule['name'] = '{}-ingress'.format(ssn_conf['firewall_name'])
             ingress_rule['targetTags'] = [ssn_conf['network_tag']]
             ingress_rule['sourceRanges'] = [ssn_conf['allowed_ip_cidr']]
             rules = [
@@ -164,7 +173,7 @@
             firewall_rules['ingress'].append(ingress_rule)
 
             egress_rule = dict()
-            egress_rule['name'] = ssn_conf['firewall_name'] + '-egress'
+            egress_rule['name'] = '{}-egress'.format(ssn_conf['firewall_name'])
             egress_rule['targetTags'] = [ssn_conf['network_tag']]
             egress_rule['destinationRanges'] = [ssn_conf['allowed_ip_cidr']]
             rules = [
@@ -185,89 +194,39 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create Firewall.", str(err))
-            if pre_defined_vpc:
-                GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-                GCPActions().remove_vpc(ssn_conf['vpc_name'])
+            dlab.fab.append_result("Failed to create Firewall.", str(err))
+            if not ssn_conf['pre_defined_subnet']:
+                GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+            if not ssn_conf['pre_defined_vpc']:
+                GCPActions.remove_vpc(ssn_conf['vpc_name'])
             sys.exit(1)
 
     try:
         logging.info('[CREATE SERVICE ACCOUNT AND ROLE]')
         print('[CREATE SERVICE ACCOUNT AND ROLE]')
-        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {}".format(
-            ssn_conf['service_account_name'], ssn_conf['role_name'],
-            ssn_conf['ssn_policy_path'], ssn_conf['ssn_roles_path'])
+        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} " \
+                 "--service_base_name {}".format( ssn_conf['service_account_name'], ssn_conf['role_name'],
+                                                  ssn_conf['ssn_policy_path'], ssn_conf['ssn_roles_path'],
+                                                  ssn_conf['ssn_unique_index'], ssn_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_service_account', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create Service account and role.", str(err))
+        dlab.fab.append_result("Unable to create Service account and role.", str(err))
         try:
-            GCPActions().remove_service_account(ssn_conf['service_account_name'])
-            GCPActions().remove_role(ssn_conf['role_name'])
+            GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+            GCPActions.remove_role(ssn_conf['role_name'])
         except:
             print("Service account hasn't been created")
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
-        sys.exit(1)
-
-    try:
-        logging.info('[CREATE BUCKETS]')
-        print('[CREATE BUCKETS]')
-        params = "--bucket_name {}".format(ssn_conf['ssn_bucket_name'])
-        try:
-            local("~/scripts/{}.py {}".format('common_create_bucket', params))
-        except:
-            traceback.print_exc()
-            raise Exception
-
-        params = "--bucket_name {}".format(ssn_conf['shared_bucket_name'])
-        try:
-            local("~/scripts/{}.py {}".format('common_create_bucket', params))
-        except:
-            traceback.print_exc()
-            raise Exception
-    except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create bucket.", str(err))
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
-        sys.exit(1)
-
-    try:
-        logging.info('[SET PERMISSIONS FOR SSN BUCKET]')
-        print('[SET PERMISSIONS FOR SSN BUCKET]')
-        GCPActions().set_bucket_owner(ssn_conf['ssn_bucket_name'], ssn_conf['service_account_name'])
-    except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to set bucket permissions.", str(err))
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
         sys.exit(1)
 
     try:
@@ -280,62 +239,62 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create static ip.", str(err))
+        dlab.fab.append_result("Failed to create static ip.", str(err))
         try:
-            GCPActions().remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
+            GCPActions.remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
         except:
             print("Static IP address hasn't been created.")
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+        GCPActions.remove_role(ssn_conf['role_name'])
+        GCPActions.remove_bucket(ssn_conf['ssn_bucket_name'])
+        GCPActions.remove_bucket(ssn_conf['shared_bucket_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
         sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
+        ssn_conf['initial_user'] = 'ubuntu'
+        ssn_conf['sudo_group'] = 'sudo'
     if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
+        ssn_conf['initial_user'] = 'ec2-user'
+        ssn_conf['sudo_group'] = 'wheel'
 
     try:
-        ssn_conf['static_ip'] = \
-            GCPMeta().get_static_address(ssn_conf['region'], ssn_conf['static_address_name'])['address']
+        ssn_conf['static_ip'] = GCPMeta.get_static_address(ssn_conf['region'],
+                                                           ssn_conf['static_address_name'])['address']
         logging.info('[CREATE SSN INSTANCE]')
         print('[CREATE SSN INSTANCE]')
         params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5}"\
                  " --ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9}"\
-                 " --instance_class {10} --static_ip {11} --network_tag {12} --labels '{13}' --primary_disk_size {14}".\
+                 " --instance_class {10} --static_ip {11} --network_tag {12} --labels '{13}' " \
+                 "--primary_disk_size {14} --service_base_name {15}".\
             format(ssn_conf['instance_name'], ssn_conf['region'], ssn_conf['zone'], ssn_conf['vpc_name'],
-                   ssn_conf['subnet_name'], ssn_conf['instance_size'], ssn_conf['ssh_key_path'], initial_user,
-                   ssn_conf['service_account_name'], ssn_conf['image_name'], 'ssn', ssn_conf['static_ip'],
-                   ssn_conf['network_tag'], json.dumps(ssn_conf['instance_labels']), '20')
+                   ssn_conf['subnet_name'], ssn_conf['instance_size'], ssn_conf['ssh_key_path'],
+                   ssn_conf['initial_user'], ssn_conf['service_account_name'], ssn_conf['image_name'], 'ssn',
+                   ssn_conf['static_ip'], ssn_conf['network_tag'], json.dumps(ssn_conf['instance_labels']), '20',
+                   ssn_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create ssn instance.", str(err))
-        GCPActions().remove_service_account(ssn_conf['service_account_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
-        GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
-        GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
-        if pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Unable to create ssn instance.", str(err))
+        GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+        GCPActions.remove_role(ssn_conf['role_name'])
+        GCPActions.remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
+        GCPActions.remove_bucket(ssn_conf['ssn_bucket_name'])
+        GCPActions.remove_bucket(ssn_conf['shared_bucket_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
index 0d9c4d4..3e20a15 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
@@ -21,11 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import sys
+import os
+import logging
+import json
+import traceback
 from fabric.api import *
-from dlab.ssn_lib import *
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -36,10 +41,11 @@
     # generating variables dictionary
     print('Generating infrastructure names and tags')
     ssn_conf = dict()
-    ssn_conf['service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
+    ssn_conf['service_base_name'] = dlab.fab.replace_multi_symbols(
+        os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
     ssn_conf['region'] = os.environ['gcp_region']
     ssn_conf['zone'] = os.environ['gcp_zone']
+    pre_defined_vpc = False
     try:
         if os.environ['gcp_vpc_name'] == '':
             raise KeyError
@@ -47,21 +53,20 @@
             pre_defined_vpc = True
             ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
     except KeyError:
-        ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
+        ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
 
     try:
         logging.info('[TERMINATE SSN]')
         print('[TERMINATE SSN]')
-        params = "--service_base_name {} --region {} --zone {} --pre_defined_vpc {} --vpc_name {}".format(ssn_conf['service_base_name'],
-                                                                       ssn_conf['region'], ssn_conf['zone'], pre_defined_vpc, ssn_conf['vpc_name'])
+        params = "--service_base_name {} --region {} --zone {} --pre_defined_vpc {} --vpc_name {}".format(
+            ssn_conf['service_base_name'], ssn_conf['region'], ssn_conf['zone'], pre_defined_vpc, ssn_conf['vpc_name'])
         try:
             local("~/scripts/{}.py {}".format('ssn_terminate_gcp_resources', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to terminate ssn.", str(err))
+        dlab.fab.append_result("Failed to terminate ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -70,6 +75,6 @@
                    "Action": "Terminate ssn with all service_base_name environment"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate_gcp_resources.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate_gcp_resources.py
index 194049b..32bde01 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate_gcp_resources.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate_gcp_resources.py
@@ -103,7 +103,7 @@
         list_service_accounts = GCPMeta().get_list_service_accounts()
         for service_account in list_service_accounts:
             if service_account.startswith(args.service_base_name):
-                GCPActions().remove_service_account(service_account)
+                GCPActions().remove_service_account(service_account, args.service_base_name)
         list_roles_names = GCPMeta().get_list_roles()
         for role in list_roles_names:
             if role.startswith(args.service_base_name):
@@ -143,4 +143,4 @@
             print("No such VPC")
             sys.exit(1)
     else:
-        print('VPC is predefined, VPC will not be deleted')
\ No newline at end of file
+        print('VPC is predefined, VPC will not be deleted')
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
new file mode 100644
index 0000000..e43517e
--- /dev/null
+++ b/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+import json
+import sys
+import requests
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import uuid
+from fabric.api import *
+
+
+if __name__ == "__main__":
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        if os.environ['conf_os_family'] == 'debian':
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
+
+        logging.info('[CREATING DLAB SSH USER]')
+        print('[CREATING DLAB SSH USER]')
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
+
+        try:
+            local("~/scripts/{}.py {}".format('create_ssh_user', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    # configuring proxy on Notebook instance
+    try:
+        logging.info('[CONFIGURE PROXY ON SUPERSET INSTANCE]')
+        print('[CONFIGURE PROXY ON SUPERSET INSTANCE]')
+        additional_config = {"proxy_host": edge_instance_private_ip, "proxy_port": "3128"}
+        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
+            .format(instance_hostname, notebook_config['instance_name'], notebook_config['ssh_key_path'],
+                    json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_proxy', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[CONFIGURE KEYCLOAK]')
+        logging.info('[CONFIGURE KEYCLOAK]')
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            os.environ['keycloak_auth_server_url'])
+        keycloak_client_create_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+                                                                           os.environ['keycloak_realm_name'])
+        keycloak_auth_data = {
+            "username": os.environ['keycloak_user'],
+            "password": os.environ['keycloak_user_password'],
+            "grant_type": "password",
+            "client_id": "admin-cli",
+        }
+        try:
+            keycloak_client_id = "{}-{}-superset".format(notebook_config['service_base_name'],
+                                                         notebook_config['project_name'])
+            client_params = {
+                "clientId": keycloak_client_id,
+            }
+            keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
+            keycloak_get_id_client = requests.get(
+                keycloak_client_create_url, data=keycloak_auth_data, params=client_params,
+                headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+                         "Content-Type": "application/json"})
+            json_keycloak_client_id = json.loads(keycloak_get_id_client.text)
+            # Check, if response is not empty
+            if len(json_keycloak_client_id) != 0:
+                print('Keycloak client {} exists. Getting his required attributes.'.format(keycloak_client_id))
+                keycloak_id_client = json_keycloak_client_id[0]['id']
+                keycloak_client_get_secret_url = ("{0}/{1}/client-secret".format(keycloak_client_create_url,
+                                                                                 keycloak_id_client))
+                keycloak_client_get_secret = requests.get(
+                    keycloak_client_get_secret_url, data=keycloak_auth_data,
+                    headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")), "Content-Type":
+                        "application/json"})
+                json_keycloak_client_secret = json.loads(keycloak_client_get_secret.text)
+                keycloak_client_secret = json_keycloak_client_secret['value']
+            else:
+                print('Keycloak client does not exists. Creating new client {0}.'.format(keycloak_client_id))
+                keycloak_client_secret = str(uuid.uuid4())
+                keycloak_client_data = {
+                    "clientId": keycloak_client_id,
+                    "enabled": "true",
+                    "redirectUris": ["*"],
+                    "secret": keycloak_client_secret,
+                }
+                keycloak_client = requests.post(
+                    keycloak_client_create_url, json=keycloak_client_data,
+                    headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+                             "Content-Type": "application/json"})
+        except Exception as err:
+            dlab.fab.append_result("Failed to configure keycloak.")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure keycloak.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    # updating repositories & installing and configuring superset
+    try:
+        logging.info('[CONFIGURE SUPERSET NOTEBOOK INSTANCE]')
+        print('[CONFIGURE SUPERSET NOTEBOOK INSTANCE]')
+        params = "--hostname {} --keyfile {} " \
+                 "--region {} --os_user {} " \
+                 "--dlab_path {} --keycloak_auth_server_url {} " \
+                 "--keycloak_realm_name {} --keycloak_client_id {} " \
+                 "--keycloak_client_secret {} --edge_instance_private_ip {} " \
+                 "--edge_instance_public_ip {} --superset_name {} ".\
+            format(instance_hostname, notebook_config['ssh_key_path'],
+                   os.environ['gcp_region'], notebook_config['dlab_ssh_user'],
+                   os.environ['ssn_dlab_path'], os.environ['keycloak_auth_server_url'],
+                   os.environ['keycloak_realm_name'], keycloak_client_id,
+                   keycloak_client_secret, edge_instance_private_ip,
+                   edge_instance_hostname, notebook_config['exploratory_name'])
+        try:
+            local("~/scripts/{}.py {}".format('configure_superset_node', params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure superset.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[INSTALLING USERs KEY]')
+        logging.info('[INSTALLING USERs KEY]')
+        additional_config = {"user_keyname": os.environ['project_name'],
+                             "user_keydir": os.environ['conf_key_dir']}
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/{}.py {}".format('install_user_key', params))
+        except:
+            dlab.fab.append_result("Failed installing users key")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[SETUP USER GIT CREDENTIALS]')
+        logging.info('[SETUP USER GIT CREDENTIALS]')
+        params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
+            .format(notebook_config['dlab_ssh_user'], instance_hostname, notebook_config['ssh_key_path'])
+        try:
+            local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
+            local("~/scripts/{}.py {}".format('manage_git_creds', params))
+        except:
+            dlab.fab.append_result("Failed setup git credentials")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    if notebook_config['image_enabled'] == 'true':
+        try:
+            print('[CREATING IMAGE]')
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
+            if primary_image_id == '':
+                print("Looks like it's first time we configure notebook server. Creating images.")
+                image_id_list = GCPActions.create_image_from_instance_disks(
+                    notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
+                    notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
+                if image_id_list and image_id_list[0] != '':
+                    print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
+                else:
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
+                if image_id_list and image_id_list[1] != '':
+                    print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
+        except Exception as err:
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
+            sys.exit(1)
+
+    try:
+        print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
+        additional_info = {
+            'instance_hostname': instance_hostname,
+            'tensor': False
+        }
+        params = "--edge_hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} " \
+                 "--type {} " \
+                 "--exploratory_name {} " \
+                 "--additional_info '{}'"\
+            .format(edge_instance_hostname,
+                    notebook_config['ssh_key_path'],
+                    notebook_config['dlab_ssh_user'],
+                    'superset',
+                    notebook_config['exploratory_name'],
+                    json.dumps(additional_info))
+        try:
+            local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
+        except:
+            dlab.fab.append_result("Failed edge reverse proxy template")
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[CONFIGURING PROXY FOR DOCKER]')
+        logging.info('[CONFIGURING PROXY FOR DOCKER]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   notebook_config['ssh_key_path'],
+                   notebook_config['dlab_ssh_user'])
+        try:
+            local("~/scripts/configure_proxy_for_docker.py {}".format(params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        print('[STARTING SUPERSET]')
+        logging.info('[STARTING SUPERSET]')
+        params = "--hostname {} " \
+                 "--keyfile {} " \
+                 "--os_user {} ". \
+            format(instance_hostname,
+                   notebook_config['ssh_key_path'],
+                   notebook_config['dlab_ssh_user'])
+        try:
+           local("~/scripts/superset_start.py {}".format(params))
+        except:
+            traceback.print_exc()
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to start Superset.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        superset_ip_url = "http://" + ip_address + ":8088/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        superset_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
+        superset_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("SUPERSET URL: {}".format(superset_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(superset_notebook_acces_url))
+        print("ReverseProxyUngit".format(superset_ungit_acces_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
+                                                                                           notebook_config['dlab_ssh_user'],
+                                                                                           ip_address))
+
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Superset",
+                        "url": superset_notebook_acces_url},
+                       {"description": "Ungit",
+                        "url": superset_ungit_acces_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
index ac4cdde..cef61d1 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
@@ -24,12 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -43,68 +44,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['rstudio_pass'] = id_generator()
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -131,9 +140,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -149,9 +157,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and RSTUDIO and all dependencies
@@ -172,9 +179,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure tensoflow-rstudio.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure tensoflow-rstudio.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -183,16 +189,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -204,21 +210,20 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
@@ -229,11 +234,10 @@
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -258,62 +262,67 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    rstudio_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_acces_url = "http://" + edge_instance_hostname + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+            notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Rstudio",
-                    "url": rstudio_notebook_acces_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_acces_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_acces_url}#,
-                   #{"description": "Rstudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Rstudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "Rstudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
index 9a984ee..613b4a1 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
@@ -24,11 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,77 +40,84 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
-
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -126,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -144,9 +151,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and all dependencies
@@ -163,9 +169,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure TensorFlow.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -179,12 +184,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -196,21 +200,20 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
@@ -221,11 +224,10 @@
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -250,60 +252,63 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_acces_url = "http://" + edge_instance_hostname + "/{}-tensor/".format(
-        notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
index 4627f6b..d6c4fab 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
@@ -24,11 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,76 +40,84 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
-
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -125,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -143,9 +151,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring zeppelin and all dependencies
@@ -179,9 +186,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure zeppelin.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -195,12 +201,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -212,36 +217,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -266,51 +269,53 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        zeppelin_ip_url = "http://" + ip_address + ":8080/"
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        zeppelin_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("Zeppelin URL: {}".format(zeppelin_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    zeppelin_ip_url = "http://" + ip_address + ":8080/"
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    zeppelin_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
-        notebook_config['exploratory_name'])
-    zeppelin_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("Zeppelin URL: {}".format(zeppelin_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Apache Zeppelin",
-                    "url": zeppelin_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": zeppelin_ungit_acces_url}#,
-                   #{"description": "Apache Zeppelin (via tunnel)",
-                   # "url": zeppelin_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Apache Zeppelin",
+                        "url": zeppelin_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": zeppelin_ungit_access_url}#,
+                       #{"description": "Apache Zeppelin (via tunnel)",
+                       # "url": zeppelin_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py b/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
index 622297a..ef8f4f8 100644
--- a/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
+++ b/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
@@ -51,6 +51,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_jupyter():
     try:
         sudo('systemctl stop jupyter-notebook')
@@ -67,6 +68,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_zeppelin():
     try:
         sudo('systemctl stop zeppelin-notebook')
@@ -81,6 +83,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_rstudio():
     try:
         remove_os_pkg(['rstudio-server'])
@@ -90,6 +93,7 @@
         print('Error:', str(err))
         sys.exit(1)
 
+
 def clean_tensor():
     try:
         clean_jupyter()
@@ -100,6 +104,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_tensor_rstudio():
     try:
         clean_rstudio()
@@ -110,6 +115,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 if __name__ == "__main__":
     print('Configure connections')
     env['connection_attempts'] = 100
@@ -117,19 +123,19 @@
     env.host_string = args.os_user + '@' + args.hostname
 
     if os.environ['conf_cloud_provider'] == 'azure':
-         de_master_name = '{}-{}-de-{}-{}-m'.format(
-            os.environ['conf_service_base_name'],
-            os.environ['project_name'].replace("_", "-"),
-            os.environ['exploratory_name'].replace("_", "-"),
-            os.environ['computational_name'].replace("_", "-"))
-         de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
-            de_master_name)
-         default_ami_id = 'default'
-    else:
-        de_master_name = '{}-{}-de-{}-{}-m'.format(
+        de_master_name = '{}-{}-{}-de-{}-m'.format(
             os.environ['conf_service_base_name'],
             os.environ['project_name'],
-            os.environ['exploratory_name'],
+            os.environ['endpoint_name'],
+            os.environ['computational_name'])
+        de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
+                                                   de_master_name)
+        default_ami_id = 'default'
+    else:
+        de_master_name = '{}-{}-{}-de-{}-m'.format(
+            os.environ['conf_service_base_name'],
+            os.environ['project_name'],
+            os.environ['endpoint_name'],
             os.environ['computational_name'])
         de_ami_id = get_ami_id_by_instance_name(de_master_name)
         default_ami_id = get_ami_id(
diff --git a/infrastructure-provisioning/src/general/scripts/os/configure_proxy_for_docker.py b/infrastructure-provisioning/src/general/scripts/os/configure_proxy_for_docker.py
new file mode 100644
index 0000000..dc1094a
--- /dev/null
+++ b/infrastructure-provisioning/src/general/scripts/os/configure_proxy_for_docker.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+from fabric.api import *
+import sys
+import argparse
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--hostname', type=str, default='')
+parser.add_argument('--keyfile', type=str, default='')
+parser.add_argument('--os_user', type=str, default='')
+args = parser.parse_args()
+
+http_file = '/etc/systemd/system/docker.service.d/http-proxy.conf'
+https_file = '/etc/systemd/system/docker.service.d/https-proxy.conf'
+
+if __name__ == "__main__":
+    print("Configure connections")
+    env['connection_attempts'] = 100
+    env.key_filename = [args.keyfile]
+    env.host_string = args.os_user + '@' + args.hostname
+    print("Configuring proxy for docker")
+    try:
+        sudo('mkdir -p /etc/systemd/system/docker.service.d')
+        sudo('touch {}'.format(http_file))
+        sudo('echo -e \'[Service] \nEnvironment=\"HTTP_PROXY=\'$http_proxy\'\"\' > {}'.format(http_file))
+        sudo('touch {}'.format(https_file))
+        sudo('echo -e \'[Service] \nEnvironment=\"HTTPS_PROXY=\'$http_proxy\'\"\' > {}'.format(https_file))
+        sudo('mkdir /home/{}/.docker'.format(args.os_user))
+        sudo('touch /home/{}/.docker/config.json'.format(args.os_user))
+        sudo(
+            'echo -e \'{\n "proxies":\n {\n   "default":\n   {\n     "httpProxy":"\'$http_proxy\'",\n     "httpsProxy":"\'$http_proxy\'"\n   }\n }\n}\' > /home/dlab-user/.docker/config.json')
+        sudo('usermod -a -G docker ' + args.os_user)
+        sudo('update-rc.d docker defaults')
+        sudo('update-rc.d docker enable')
+        sudo('systemctl restart docker')
+    except Exception as err:
+        print('Error: {0}'.format(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
index 089e316..f8729f1 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
@@ -63,7 +63,7 @@
         try:
             data_engine['os_user'] = os.environ['conf_os_user']
             data_engine['service_base_name'] = os.environ['conf_service_base_name']
-            data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+            data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
             data_engine['cluster_name'] = os.environ['computational_id']
             data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
             data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
index 0cc5f6e..9f2b18b 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
@@ -46,7 +46,7 @@
         try:
             data_engine['os_user'] = os.environ['conf_os_user']
             data_engine['service_base_name'] = os.environ['conf_service_base_name']
-            data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+            data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
             data_engine['cluster_name'] = os.environ['computational_id']
             data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
             data_engine['master_ip'] = get_instance_private_ip_address(
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
index 006a313..425b12b 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
@@ -63,7 +63,7 @@
         try:
             data_engine['os_user'] = os.environ['conf_os_user']
             data_engine['service_base_name'] = os.environ['conf_service_base_name']
-            data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+            data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
             data_engine['cluster_name'] = os.environ['computational_id']
             data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
             data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
diff --git a/infrastructure-provisioning/src/general/scripts/os/get_list_available_pkgs.py b/infrastructure-provisioning/src/general/scripts/os/get_list_available_pkgs.py
index c4107a7..48d22a1 100644
--- a/infrastructure-provisioning/src/general/scripts/os/get_list_available_pkgs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/get_list_available_pkgs.py
@@ -82,12 +82,12 @@
     all_pkgs['os_pkg'] = get_available_os_pkgs()
     all_pkgs['java'] = {}
 
-    if os.environ['application'] in ('jupyter', 'zeppelin', 'deeplearning', 'tensor', 'tensor-rstudio', 'rstudio'):
+    if os.environ['application'] in ('jupyter', 'jupyterlab', 'zeppelin', 'deeplearning', 'tensor', 'tensor-rstudio', 'rstudio'):
         all_pkgs['pip2'] = get_available_pip_pkgs("2.7")
         all_pkgs['pip3'] = get_available_pip_pkgs("3.5")
         all_pkgs['others'] = get_uncategorised_pip_pkgs(all_pkgs['pip2'], all_pkgs['pip3'])
 
-    if (os.environ['application'] in ('jupyter', 'zeppelin')
+    if (os.environ['application'] in ('jupyter', 'jupyterlab', 'zeppelin')
         and os.environ['notebook_r_enabled'] == 'true')\
             or os.environ['application'] in ('rstudio', 'tensor-rstudio'):
         all_pkgs['r_pkg'] = get_available_r_pkgs()
diff --git a/infrastructure-provisioning/src/general/scripts/os/jupyter_dataengine_create_configs.py b/infrastructure-provisioning/src/general/scripts/os/jupyter_dataengine_create_configs.py
index 5b4a51c..60a3246 100644
--- a/infrastructure-provisioning/src/general/scripts/os/jupyter_dataengine_create_configs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/jupyter_dataengine_create_configs.py
@@ -79,7 +79,7 @@
 
 def toree_kernel(args):
     spark_path = '/opt/' + args.cluster_name + '/spark/'
-    scala_version = local('scala -e "println(scala.util.Properties.versionNumberString)"', capture=True)
+    scala_version = local('spark-submit --version 2>&1 | grep -o -P "Scala version \K.{0,7}"', capture=True)
     local('mkdir -p ' + kernels_dir + 'toree_' + args.cluster_name + '/')
     local('tar zxvf /tmp/{}/toree_kernel.tar.gz -C '.format(args.cluster_name) + kernels_dir + 'toree_' + args.cluster_name + '/')
     local('sudo mv {0}toree_{1}/toree-0.2.0-incubating/* {0}toree_{1}/'.format(kernels_dir, args.cluster_name))
diff --git a/infrastructure-provisioning/src/general/scripts/os/jupyterlab_container_start.py b/infrastructure-provisioning/src/general/scripts/os/jupyterlab_container_start.py
new file mode 100644
index 0000000..c4ff97b
--- /dev/null
+++ b/infrastructure-provisioning/src/general/scripts/os/jupyterlab_container_start.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import sys
+import os
+from dlab.notebook_lib import *
+from dlab.fab import *
+from fabric.api import *
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--hostname', type=str, default='')
+parser.add_argument('--keyfile', type=str, default='')
+parser.add_argument('--os_user', type=str, default='')
+args = parser.parse_args()
+
+jupyterlab_dir = '/home/' + args.os_user + '/.jupyterlab/'
+
+def start_jupyterlab_container(jupyterlab_dir):
+    try:
+        with cd('{}'.format(jupyterlab_dir)):
+            run('docker build --file Dockerfile_jupyterlab -t jupyter-lab .'.format(args.os_user))
+            container_id = run('docker ps | awk \'NR==2{print $1}\'')
+            if container_id != '':
+                run('docker stop ' + container_id)
+            run('docker run -d --restart unless-stopped -p 8888:8888 \
+                     -v /home/{0}:/opt/legion/repository \
+                     -v /home/{0}/.ssh/:/home/{0}/.ssh/ \
+                     jupyter-lab:latest'.format(args.os_user))
+    except: sys.exit(1)
+
+if __name__ == "__main__":
+    print("Configure connections")
+    env['connection_attempts'] = 100
+    env.key_filename = [args.keyfile]
+    env.host_string = args.os_user + '@' + args.hostname
+    print("Starting Jupyter container")
+    try:
+        start_jupyterlab_container(jupyterlab_dir)
+    except Exception as err:
+        print('Error: {0}'.format(err))
+        sys.exit(1)
+
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py b/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
index 23f889f..eaf9ea5 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
@@ -45,8 +45,8 @@
         notebook_config = dict()
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
         notebook_config['os_user'] = os.environ['conf_os_user']
-        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
         notebook_config['notebook_ip'] = get_instance_private_ip_address(
             notebook_config['tag_name'], notebook_config['notebook_name'])
         notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py b/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
index 59074f8..5e51179 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
@@ -44,8 +44,8 @@
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
             notebook_config['resource_type'] = os.environ['conf_resource']
-            notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py b/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
index 17abe27..b56157a 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
@@ -47,8 +47,8 @@
         try:
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
-            notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py b/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
index 839f3f9..820c818 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
@@ -46,8 +46,8 @@
         try:
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
-            notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py b/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
index 849333c..8bc607c 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
@@ -48,7 +48,7 @@
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
             notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/superset_start.py b/infrastructure-provisioning/src/general/scripts/os/superset_start.py
new file mode 100644
index 0000000..33bbc9c
--- /dev/null
+++ b/infrastructure-provisioning/src/general/scripts/os/superset_start.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import sys
+import os
+from dlab.notebook_lib import *
+from dlab.fab import *
+from fabric.api import *
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--hostname', type=str, default='')
+parser.add_argument('--keyfile', type=str, default='')
+parser.add_argument('--os_user', type=str, default='')
+args = parser.parse_args()
+
+superset_dir = '/home/' + args.os_user + '/incubator-superset/contrib/docker'
+
+def start_superset(superset_dir):
+    try:
+        with cd('{}'.format(superset_dir)):
+            sudo('docker-compose run --rm superset ./docker-init.sh')
+        sudo('cp /opt/dlab/templates/superset-notebook.service /tmp/')
+        sudo('sed -i \'s/OS_USER/{}/g\' /tmp/superset-notebook.service'.format(args.os_user))
+        sudo('cp /tmp/superset-notebook.service /etc/systemd/system/')
+        sudo('systemctl daemon-reload')
+        sudo('systemctl enable superset-notebook')
+        sudo('systemctl start superset-notebook')
+    except: sys.exit(1)
+
+if __name__ == "__main__":
+    print("Configure connections")
+    env['connection_attempts'] = 100
+    env.key_filename = [args.keyfile]
+    env.host_string = args.os_user + '@' + args.hostname
+    print("Starting Superset")
+    try:
+        start_superset(superset_dir)
+    except Exception as err:
+        print('Error: {0}'.format(err))
+        sys.exit(1)
+
diff --git a/infrastructure-provisioning/src/general/templates/aws/interpreter_livy.json b/infrastructure-provisioning/src/general/templates/aws/interpreter_livy.json
index 4fa46d6..9ad1354 100644
--- a/infrastructure-provisioning/src/general/templates/aws/interpreter_livy.json
+++ b/infrastructure-provisioning/src/general/templates/aws/interpreter_livy.json
@@ -129,7 +129,7 @@
     {
       "id": "central",
       "type": "default",
-      "url": "http://repo1.maven.org/maven2/",
+      "url": "https://repo1.maven.org/maven2/",
       "releasePolicy": {
         "enabled": true,
         "updatePolicy": "daily",
diff --git a/infrastructure-provisioning/src/general/templates/aws/interpreter_spark.json b/infrastructure-provisioning/src/general/templates/aws/interpreter_spark.json
index 0d354cd..56eb105 100644
--- a/infrastructure-provisioning/src/general/templates/aws/interpreter_spark.json
+++ b/infrastructure-provisioning/src/general/templates/aws/interpreter_spark.json
@@ -306,7 +306,7 @@
     {
       "id": "central",
       "type": "default",
-      "url": "http://repo1.maven.org/maven2/",
+      "url": "https://repo1.maven.org/maven2/",
       "releasePolicy": {
         "enabled": true,
         "updatePolicy": "daily",
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_data_engine/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_data_engine/config.xml
index 6430bd4..dd58f62 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_data_engine/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_data_engine/config.xml
@@ -72,6 +72,7 @@
                   <choices class="java.util.Arrays$ArrayList">
                     <a class="string-array">
                       <string>jupyter</string>
+                      <string>jupyterlab</string>
                       <string>rstudio</string>
                       <string>zeppelin</string>
                       <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_image/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_image/config.xml
index 7633fd5..05a6135 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_image/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_image/config.xml
@@ -51,6 +51,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>deeplearning</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_server/config.xml
index 6e29a79..dac6f79 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/create_notebook_server/config.xml
@@ -42,6 +42,7 @@
               <string>tensor</string>
               <string>tensor-rstudio</string>
               <string>deeplearning</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/manage_git_credentials/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/manage_git_credentials/config.xml
index ed7e0b3..119138c 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/manage_git_credentials/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/manage_git_credentials/config.xml
@@ -46,6 +46,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>deeplearning</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_list_available_libs/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_list_available_libs/config.xml
index 9a5da7d..6c88458 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_list_available_libs/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_list_available_libs/config.xml
@@ -46,6 +46,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>deeplearning</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_reconfigure_spark/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_reconfigure_spark/config.xml
index efe4f35..48f3669 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_reconfigure_spark/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/notebook_reconfigure_spark/config.xml
@@ -46,6 +46,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>deeplearning</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/start_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/start_notebook_server/config.xml
index 4c33872..b2fd398 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/start_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/start_notebook_server/config.xml
@@ -46,6 +46,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>tensor-rstudio</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/stop_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/stop_notebook_server/config.xml
index c1103b5..f0ad86a 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/stop_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/stop_notebook_server/config.xml
@@ -46,6 +46,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>tensor-rstudio</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_image/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_image/config.xml
index d8e5a0a..5f970ed 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_image/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_image/config.xml
@@ -46,6 +46,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>deeplearning</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_server/config.xml
index a594df0..2ca3d7f 100644
--- a/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/aws/jenkins_jobs/terminate_notebook_server/config.xml
@@ -46,6 +46,7 @@
               <string>zeppelin</string>
               <string>tensor</string>
               <string>tensor-rstudio</string>
+              <string>jupyterlab</string>
             </a>
           </choices>
         </hudson.model.ChoiceParameterDefinition>
diff --git a/infrastructure-provisioning/src/general/templates/azure/interpreter_livy.json b/infrastructure-provisioning/src/general/templates/azure/interpreter_livy.json
index 4fa46d6..9ad1354 100644
--- a/infrastructure-provisioning/src/general/templates/azure/interpreter_livy.json
+++ b/infrastructure-provisioning/src/general/templates/azure/interpreter_livy.json
@@ -129,7 +129,7 @@
     {
       "id": "central",
       "type": "default",
-      "url": "http://repo1.maven.org/maven2/",
+      "url": "https://repo1.maven.org/maven2/",
       "releasePolicy": {
         "enabled": true,
         "updatePolicy": "daily",
diff --git a/infrastructure-provisioning/src/general/templates/azure/interpreter_spark.json b/infrastructure-provisioning/src/general/templates/azure/interpreter_spark.json
index 0d354cd..56eb105 100644
--- a/infrastructure-provisioning/src/general/templates/azure/interpreter_spark.json
+++ b/infrastructure-provisioning/src/general/templates/azure/interpreter_spark.json
@@ -306,7 +306,7 @@
     {
       "id": "central",
       "type": "default",
-      "url": "http://repo1.maven.org/maven2/",
+      "url": "https://repo1.maven.org/maven2/",
       "releasePolicy": {
         "enabled": true,
         "updatePolicy": "daily",
diff --git a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_image/config.xml b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_image/config.xml
index 7633fd5..f7a2715 100644
--- a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_image/config.xml
+++ b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_image/config.xml
@@ -47,6 +47,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_server/config.xml
index 3c42f15..1c3967e 100644
--- a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/create_notebook_server/config.xml
@@ -47,6 +47,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/manage_git_credentials/config.xml b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/manage_git_credentials/config.xml
index bebdc9e..4a5717d 100644
--- a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/manage_git_credentials/config.xml
+++ b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/manage_git_credentials/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/start_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/start_notebook_server/config.xml
index 230ff9b..1344e62 100644
--- a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/start_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/start_notebook_server/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/stop_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/stop_notebook_server/config.xml
index 02da1ab..3611b2f 100644
--- a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/stop_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/stop_notebook_server/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_image/config.xml b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_image/config.xml
index d8e5a0a..5fa3342 100644
--- a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_image/config.xml
+++ b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_image/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_server/config.xml
index 23f2c32..57e4c1b 100644
--- a/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/azure/jenkins_jobs/terminate_notebook_server/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/gcp/interpreter_livy.json b/infrastructure-provisioning/src/general/templates/gcp/interpreter_livy.json
index d027e3d..35438c4 100644
--- a/infrastructure-provisioning/src/general/templates/gcp/interpreter_livy.json
+++ b/infrastructure-provisioning/src/general/templates/gcp/interpreter_livy.json
@@ -112,7 +112,7 @@
     {
       "id": "central",
       "type": "default",
-      "url": "http://repo1.maven.org/maven2/",
+      "url": "https://repo1.maven.org/maven2/",
       "releasePolicy": {
         "enabled": true,
         "updatePolicy": "daily",
diff --git a/infrastructure-provisioning/src/general/templates/gcp/interpreter_spark.json b/infrastructure-provisioning/src/general/templates/gcp/interpreter_spark.json
index 0d342c0..17e979d 100644
--- a/infrastructure-provisioning/src/general/templates/gcp/interpreter_spark.json
+++ b/infrastructure-provisioning/src/general/templates/gcp/interpreter_spark.json
@@ -299,7 +299,7 @@
     {
       "id": "central",
       "type": "default",
-      "url": "http://repo1.maven.org/maven2/",
+      "url": "https://repo1.maven.org/maven2/",
       "releasePolicy": {
         "enabled": true,
         "updatePolicy": "daily",
diff --git a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/create_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/create_notebook_server/config.xml
index 903e063..e194549 100644
--- a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/create_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/create_notebook_server/config.xml
@@ -37,6 +37,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/manage_git_credentials/config.xml b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/manage_git_credentials/config.xml
index ed7e0b3..77696c4 100644
--- a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/manage_git_credentials/config.xml
+++ b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/manage_git_credentials/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/start_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/start_notebook_server/config.xml
index cff88ae..60ee866 100644
--- a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/start_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/start_notebook_server/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/stop_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/stop_notebook_server/config.xml
index 736431f..471cf0f 100644
--- a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/stop_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/stop_notebook_server/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/terminate_notebook_server/config.xml b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/terminate_notebook_server/config.xml
index a466df4..2d23585 100644
--- a/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/terminate_notebook_server/config.xml
+++ b/infrastructure-provisioning/src/general/templates/gcp/jenkins_jobs/terminate_notebook_server/config.xml
@@ -42,6 +42,7 @@
           <choices class="java.util.Arrays$ArrayList">
             <a class="string-array">
               <string>jupyter</string>
+              <string>jupyterlab</string>
               <string>rstudio</string>
               <string>zeppelin</string>
               <string>tensor</string>
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/src/general/templates/os/debian/superset-notebook.service
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/src/general/templates/os/debian/superset-notebook.service
index 16da950..ef3b4bf 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/src/general/templates/os/debian/superset-notebook.service
@@ -19,19 +19,19 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+[Unit]
+Description=Superset service
 
+[Service]
+Type=simple
+User=OS_USER
+Group=OS_USER
+ExecStart=/usr/bin/sudo docker-compose up
+ExecStop=/usr/bin/sudo docker-compose stop
+WorkingDirectory=/home/OS_USER/incubator-superset/contrib/docker
+TimeoutStopSec=120
+Restart=on-failure
+RestartSec=10
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/templates/os/manage_step_certs.sh b/infrastructure-provisioning/src/general/templates/os/manage_step_certs.sh
new file mode 100644
index 0000000..bc194a9
--- /dev/null
+++ b/infrastructure-provisioning/src/general/templates/os/manage_step_certs.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+root_crt_path=STEP_ROOT_CERT_PATH
+crt_path=STEP_CERT_PATH
+key_path=STEP_KEY_PATH
+ca_url=STEP_CA_URL
+resource_type=RESOURCE_TYPE
+renew_status=0
+sans='SANS'
+cn=CN
+kid=KID
+provisioner_password_path=STEP_PROVISIONER_PASSWORD_PATH
+
+function log() {
+    dt=$(date '+%d/%m/%Y %H:%M:%S');
+    echo "[${dt} | ${1}]"
+}
+
+function renew_cert() {
+  log "Trying to renew certificate ${crt_path}"
+  if [ $resource_type = 'edge' ]; then
+    step ca renew ${crt_path} ${key_path} --exec 'nginx -s reload' --ca-url ${ca_url} --root ${root_crt_path} --force --expires-in 8h
+  elif [ $resource_type = 'endpoint' ]; then
+    step ca renew ${crt_path} ${key_path} --exec "/usr/local/bin/renew_certificates.sh" --ca-url ${ca_url} --root ${root_crt_path} --force --expires-in 8h
+  elif [ $resource_type = 'ssn' ]; then
+    step ca renew ${crt_path} ${key_path} --exec "/usr/local/bin/renew_certificates.sh" --ca-url ${ca_url} --root ${root_crt_path} --force --expires-in 8h && nginx -s reload
+  else
+    log "Wrong resource type. Aborting..."
+    exit 1
+  fi
+}
+
+function recreate_cert() {
+  log "Trying to recreate certificate ${crt_path}"
+  step ca token ${cn} --kid ${kid} --ca-url "${ca_url}" --root ${root_crt_path} --password-file ${provisioner_password_path} ${sans} --output-file /tmp/step_token --force
+  token=$(cat /tmp/step_token)
+  step ca certificate ${cn} ${crt_path} ${key_path} --token "${token}" --kty=RSA --size 2048 --provisioner ${kid} --force
+  if [ $resource_type = 'edge' ]; then
+    nginx -s reload
+  elif [ $resource_type = 'endpoint' ]; then
+    /usr/local/bin/renew_certificates.sh
+  elif [ $resource_type = 'ssn' ]; then
+    /usr/local/bin/renew_certificates.sh
+    nginx -s reload
+  else
+    log "Wrong resource type. Aborting..."
+    exit 1
+  fi
+}
+renew_cert
+if [ $? -eq 0 ]; then
+  log "Certificate ${crt_path} has been renewed or hasn't been expired"
+else
+  renew_status=1
+fi
+
+if [ $renew_status -ne 0 ]; then
+  recreate_cert
+  if [ $? -eq 0 ]; then
+    log "Certificate ${crt_path} has been recreated"
+  else
+    log "Failed to recreate the certificate ${crt_path}"
+  fi
+fi
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh b/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
new file mode 100644
index 0000000..ff3e46d
--- /dev/null
+++ b/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+KEYSTORE_PASS=$(cat /opt/dlab/conf/CONF_FILE.yml  | grep '<#assign KEY_STORE_PASSWORD' | awk -F  '\"' '{print $2}')
+
+# Removing old certificates
+keytool -delete -alias RESOURCE_TYPE -keystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -storepass "${KEYSTORE_PASS}"
+keytool -delete -alias step-ca -keystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -storepass "${KEYSTORE_PASS}"
+keytool -delete -alias step-ca -keystore JAVA_HOME/lib/security/cacerts -storepass changeit
+keytool -delete -alias RESOURCE_TYPE -keystore JAVA_HOME/lib/security/cacerts -storepass changeit
+
+# Importing new certificates to keystore
+openssl pkcs12 -export -in /etc/ssl/certs/dlab.crt -inkey /etc/ssl/certs/dlab.key -name RESOURCE_TYPE -out /home/OS_USER/keys/RESOURCE_TYPE.p12 -password pass:${KEYSTORE_PASS}
+keytool -importkeystore -srckeystore /home/OS_USER/keys/RESOURCE_TYPE.p12 -srcstoretype PKCS12 -alias RESOURCE_TYPE -destkeystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -deststorepass "${KEYSTORE_PASS}" -srcstorepass "${KEYSTORE_PASS}"
+keytool -keystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -alias step-ca -import -file  /etc/ssl/certs/root_ca.crt  -deststorepass "${KEYSTORE_PASS}" -noprompt
+
+
+# Adding new certificates
+keytool -importcert -trustcacerts -alias RESOURCE_TYPE -file /etc/ssl/certs/dlab.crt -noprompt -storepass changeit -keystore JAVA_HOME/lib/security/cacerts
+keytool -importcert -trustcacerts -alias step-ca -file /etc/ssl/certs/root_ca.crt -noprompt -storepass changeit -keystore JAVA_HOME/lib/security/cacerts
+
+# Restarting service
+supervisorctl restart all
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/src/general/templates/os/step-cert-manager.service
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/src/general/templates/os/step-cert-manager.service
index 16da950..994eea7 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/src/general/templates/os/step-cert-manager.service
@@ -19,19 +19,14 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+[Unit]
+Description=Check Step certificates
+After=network.target
 
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/manage_step_certs.sh
+TimeoutStartSec=0
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+[Install]
+WantedBy=default.target
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py b/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
index 064d492..94ad123 100644
--- a/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
+++ b/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
@@ -38,7 +38,7 @@
 parser.add_argument('--os_user', type=str, default='')
 parser.add_argument('--scala_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -107,6 +107,7 @@
     # INSTALL SPARK AND CLOUD STORAGE JARS FOR SPARK
     print("Install local Spark")
     ensure_local_spark(args.os_user, spark_link, spark_version, hadoop_version, local_spark_path)
+    local_spark_scala_version = sudo('spark-submit --version 2>&1 | grep -o -P "Scala version \K.{0,7}"')
     print("Install storage jars")
     ensure_local_jars(args.os_user, jars_dir)
     print("Configure local Spark")
@@ -118,7 +119,7 @@
     print("Install py3spark local kernel for Jupyter")
     ensure_py3spark_local_kernel(args.os_user, py3spark_local_path_dir, templates_dir, spark_version)
     print("Install Toree-Scala kernel for Jupyter")
-    ensure_toree_local_kernel(args.os_user, toree_link, scala_kernel_path, files_dir, args.scala_version, spark_version)
+    ensure_toree_local_kernel(args.os_user, toree_link, scala_kernel_path, files_dir, local_spark_scala_version, spark_version)
     if os.environ['notebook_r_enabled'] == 'true':
         print("Install R kernel for Jupyter")
         ensure_r_local_kernel(spark_version, args.os_user, templates_dir, r_kernels_dir)
@@ -133,7 +134,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/jupyterlab/Dockerfile_jupyterlab b/infrastructure-provisioning/src/jupyterlab/Dockerfile_jupyterlab
new file mode 100644
index 0000000..4ebc51b
--- /dev/null
+++ b/infrastructure-provisioning/src/jupyterlab/Dockerfile_jupyterlab
@@ -0,0 +1,49 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+FROM jupyterlab_image
+
+ARG NB_USER="jovyan"
+
+EXPOSE 8888
+
+USER root
+
+RUN usermod -aG sudo $NB_USER
+
+COPY jupyter_notebook_config.py /etc/jupyter/jupyter_notebook_config.py
+
+#Prepearing Start script
+COPY jupyterlab_run.sh /jupyterlab_run.sh
+RUN  sed -i 's|CONF_PATH|/etc/jupyter/jupyter_notebook_config.py|' /jupyterlab_run.sh \
+  && chmod +x /jupyterlab_run.sh
+
+RUN apt update && apt install -y vim netcat-openbsd
+
+USER $NB_USER
+
+RUN jupyter serverextension enable --py jupyterlab_git && \
+    jupyter serverextension enable --py odahuflow.jupyterlab && \
+    echo "ENABLED PLUGINS:" && \
+    jupyter serverextension list
+
+
+ENTRYPOINT ["/jupyterlab_run.sh", "-d"]
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/jupyterlab/fabfile.py b/infrastructure-provisioning/src/jupyterlab/fabfile.py
new file mode 100644
index 0000000..110981c
--- /dev/null
+++ b/infrastructure-provisioning/src/jupyterlab/fabfile.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+import json
+import sys
+from dlab.fab import *
+from dlab.meta_lib import *
+from dlab.actions_lib import *
+import os
+import uuid
+
+
+# Main function for provisioning notebook server
+def run():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    notebook_config = dict()
+    notebook_config['uuid'] = str(uuid.uuid4())[:5]
+
+    try:
+        params = "--uuid {}".format(notebook_config['uuid'])
+        local("~/scripts/{}.py {}".format('common_prepare_notebook', params))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed preparing Notebook node.", str(err))
+        sys.exit(1)
+
+    try:
+        params = "--uuid {}".format(notebook_config['uuid'])
+        local("~/scripts/{}.py {}".format('jupyterlab_configure', params))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed configuring Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for terminating exploratory environment
+def terminate():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        local("~/scripts/{}.py".format('common_terminate_notebook'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed terminating Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for stopping notebook server
+def stop():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        local("~/scripts/{}.py".format('common_stop_notebook'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed stopping Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for starting notebook server
+def start():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('common_start_notebook'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed starting Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for configuring notebook server after deploying DataEngine service
+def configure():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        if os.environ['conf_resource'] == 'dataengine-service':
+            local("~/scripts/{}.py".format('common_notebook_configure_dataengine-service'))
+        elif os.environ['conf_resource'] == 'dataengine':
+            local("~/scripts/{}.py".format('common_notebook_configure_dataengine'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed configuring analytical tool on Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for installing additional libraries for notebook
+def install_libs():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('notebook_install_libs'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed installing additional libs for Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for get available libraries for notebook
+def list_libs():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('notebook_list_libs'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed get available libraries for notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for manage git credentials on notebook
+def git_creds():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('notebook_git_creds'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to manage git credentials for notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for creating image from notebook
+def create_image():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('common_create_notebook_image'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to create image from notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for deleting existing notebook image
+def terminate_image():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('common_terminate_notebook_image'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to create image from notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for reconfiguring Spark for notebook
+def reconfigure_spark():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('notebook_reconfigure_spark'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to reconfigure Spark for Notebook node.", str(err))
+        sys.exit(1)
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/src/jupyterlab/scripts/build.sh
similarity index 76%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/src/jupyterlab/scripts/build.sh
index d0cfc24..2d1e6e1 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/src/jupyterlab/scripts/build.sh
@@ -1,3 +1,6 @@
+#!/bin/bash
+
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +19,6 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+docker build -t jupyter-lab -f Dockerfile_jupyterlab .
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py b/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
new file mode 100644
index 0000000..1486ff3
--- /dev/null
+++ b/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import argparse
+import json
+import sys
+from dlab.notebook_lib import *
+from dlab.actions_lib import *
+from dlab.fab import *
+import os
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--hostname', type=str, default='')
+parser.add_argument('--keyfile', type=str, default='')
+parser.add_argument('--edge_ip', type=str, default='')
+parser.add_argument('--region', type=str, default='')
+parser.add_argument('--spark_version', type=str, default='')
+parser.add_argument('--hadoop_version', type=str, default='')
+parser.add_argument('--os_user', type=str, default='')
+parser.add_argument('--scala_version', type=str, default='')
+parser.add_argument('--r_mirror', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
+parser.add_argument('--exploratory_name', type=str, default='')
+args = parser.parse_args()
+
+spark_version = args.spark_version
+hadoop_version = args.hadoop_version
+jupyter_version = os.environ['notebook_jupyter_version']
+scala_link = "http://www.scala-lang.org/files/archive/"
+if args.region == 'cn-north-1':
+    spark_link = "http://mirrors.hust.edu.cn/apache/spark/spark-" + spark_version + "/spark-" + spark_version + \
+                 "-bin-hadoop" + hadoop_version + ".tgz"
+else:
+    spark_link = "https://archive.apache.org/dist/spark/spark-" + spark_version + "/spark-" + spark_version + \
+                 "-bin-hadoop" + hadoop_version + ".tgz"
+
+docker_version = '18.09.4'
+http_file = '/etc/systemd/system/docker.service.d/http-proxy.conf'
+https_file = '/etc/systemd/system/docker.service.d/https-proxy.conf'
+legion_dir = '/home/' + args.os_user + '/legion/legion/'
+jupyterlab_image = os.environ['notebook_jupyterlab_image']
+jupyterlab_dir = '/home/' + args.os_user + '/.jupyterlab/'
+spark_script = jupyterlab_dir + 'spark.sh'
+pyspark_local_path_dir = '/home/' + args.os_user + '/.jupyterlab/kernels/pyspark_local/'
+py3spark_local_path_dir = '/home/' + args.os_user + '/.jupyterlab/kernels/py3spark_local/'
+jupyter_conf_file = jupyterlab_dir + 'jupyter_notebook_config.py'
+jupyterlab_conf_file = '\/etc\/jupyter\/jupyter_notebook_config.py'
+scala_kernel_path = '/usr/local/share/jupyter/kernels/apache_toree_scala/'
+r_kernels_dir = '/home/' + args.os_user + '/.local/share/jupyter/kernels/'
+jars_dir = '/opt/jars/'
+templates_dir = '/root/templates/'
+files_dir = '/root/files/'
+local_spark_path = '/opt/spark/'
+toree_link = 'http://archive.apache.org/dist/incubator/toree/0.2.0-incubating/toree-pip/toree-0.2.0.tar.gz'
+r_libs = ['R6', 'pbdZMQ', 'RCurl', 'devtools', 'reshape2', 'caTools', 'rJava', 'ggplot2']
+gitlab_certfile = os.environ['conf_gitlab_certfile']
+
+
+##############
+# Run script #
+##############
+if __name__ == "__main__":
+    print("Configure connections")
+    env['connection_attempts'] = 100
+    env.key_filename = [args.keyfile]
+    env.host_string = args.os_user + '@' + args.hostname
+
+    # PREPARE DISK
+    print("Prepare .ensure directory")
+    try:
+        if not exists('/home/' + args.os_user + '/.ensure_dir'):
+            sudo('mkdir /home/' + args.os_user + '/.ensure_dir')
+    except:
+        sys.exit(1)
+    print("Mount additional volume")
+    prepare_disk(args.os_user)
+
+    # INSTALL DOCKER
+    print ("Install Docker")
+    configure_docker(args.os_user)
+
+    # CONFIGURE JUPYTER FILES
+    print("Configure jupyter files")
+    ensure_jupyterlab_files(args.os_user, jupyterlab_dir, jupyterlab_image, jupyter_conf_file, jupyterlab_conf_file, args.exploratory_name, args.edge_ip)
+
+    # INSTALL UNGIT
+    print("Install nodejs")
+    install_nodejs(args.os_user)
+    print("Install ungit")
+    install_ungit(args.os_user, args.exploratory_name, args.edge_ip)
+    if exists('/home/{0}/{1}'.format(args.os_user, gitlab_certfile)):
+        install_gitlab_cert(args.os_user, gitlab_certfile)
+
+    # INSTALL INACTIVITY CHECKER
+    print("Install inactivity checker")
+    install_inactivity_checker(args.os_user, args.ip_address)
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/src/jupyterlab/scripts/jupyterlab_run.sh
similarity index 75%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/src/jupyterlab/scripts/jupyterlab_run.sh
index 16da950..55c2245 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/src/jupyterlab/scripts/jupyterlab_run.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 # *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -19,19 +21,12 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+jupyter lab --config CONF_PATH
 
+if [[$1 == '-d']];then
+        while true; do sleep 1000; done
+fi
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+if [[$1 == "-bash"]];then
+        /bin/bash
+fi
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/src/jupyterlab/scripts/start.sh
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/src/jupyterlab/scripts/start.sh
index 16da950..2a6e089 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/src/jupyterlab/scripts/start.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 # *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -19,19 +21,7 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+docker run -d --restart unless-stopped -p 8888:8888 \
+    -v jup_volume:/opt/legion/repository \
+    -v /home/nb_user/.ssh/:/home/nb_user/.ssh/ \
+    jupyter-lab:latest
diff --git a/infrastructure-provisioning/src/project/fabfile.py b/infrastructure-provisioning/src/project/fabfile.py
index 5949469..385704e 100644
--- a/infrastructure-provisioning/src/project/fabfile.py
+++ b/infrastructure-provisioning/src/project/fabfile.py
@@ -45,13 +45,6 @@
         append_result("Failed preparing Project.", str(err))
         sys.exit(1)
 
-#    try:
-#        local("~/scripts/{}.py".format('edge_prepare'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed preparing Edge node.", str(err))
-#        sys.exit(1)
-
     try:
         local("~/scripts/{}.py".format('edge_configure'))
     except Exception as err:
diff --git a/infrastructure-provisioning/src/project/scripts/configure_keycloak.py b/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
new file mode 100644
index 0000000..80e7501
--- /dev/null
+++ b/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+import json
+import sys
+import requests
+import argparse
+from dlab.fab import *
+from dlab.meta_lib import *
+from dlab.actions_lib import *
+import os
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--service_base_name', type=str, default='')
+parser.add_argument('--keycloak_auth_server_url', type=str, default='')
+parser.add_argument('--keycloak_realm_name', type=str, default='')
+parser.add_argument('--keycloak_user', type=str, default='')
+parser.add_argument('--keycloak_user_password', type=str, default='')
+parser.add_argument('--keycloak_client_secret', type=str, default='')
+parser.add_argument('--edge_public_ip', type=str, default='')
+parser.add_argument('--hostname', type=str, default='')
+parser.add_argument('--project_name', type=str, default='')
+parser.add_argument('--endpoint_name', type=str, default='')
+args = parser.parse_args()
+
+##############
+# Run script #
+##############
+if __name__ == "__main__":
+    try:
+        print('[CONFIGURE KEYCLOAK]')
+        logging.info('[CONFIGURE KEYCLOAK]')
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            args.keycloak_auth_server_url)
+        keycloak_auth_data = {
+            "username": args.keycloak_user,
+            "password": args.keycloak_user_password,
+            "grant_type": "password",
+            "client_id": "admin-cli",
+        }
+
+        keycloak_client_create_url = '{0}/admin/realms/{1}/clients'.format(args.keycloak_auth_server_url,
+                                                                           args.keycloak_realm_name)
+        keycloak_client_name = "{0}-{1}-{2}".format(args.service_base_name, args.project_name, args.endpoint_name)
+        keycloak_client_id = str(uuid.uuid4())
+        if args.hostname == '':
+            keycloak_redirectUris = 'https://{0}/*,http://{0}/*'.format(args.edge_public_ip).lower().split(',')
+            print(keycloak_redirectUris)
+        else:
+            keycloak_redirectUris = 'https://{0}/*,http://{0}/*,https://{1}/*,http://{1}/*'.format(
+                args.edge_public_ip, args.hostname).lower().split(',')
+        keycloak_client_data = {
+            "clientId": keycloak_client_name,
+            "id": keycloak_client_id,
+            "enabled": "true",
+            "redirectUris": keycloak_redirectUris,
+            "publicClient": "false",
+            "secret": args.keycloak_client_secret,
+            "protocol": "openid-connect",
+        }
+
+        try:
+            keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data, verify=False).json()
+
+            keycloak_client = requests.post(keycloak_client_create_url, json=keycloak_client_data,
+                                            headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
+                                                     "Content-Type": "application/json"}, verify=False)
+
+        except Exception as err:
+            append_result("Failed to configure keycloak.")
+            raise Exception
+    except Exception as err:
+        print('Error: {0}'.format(err))
+        append_result("Failed to configure keycloak.", str(err))
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/project/scripts/configure_nginx_reverse_proxy.py b/infrastructure-provisioning/src/project/scripts/configure_nginx_reverse_proxy.py
index 2d564f7..a4f0825 100644
--- a/infrastructure-provisioning/src/project/scripts/configure_nginx_reverse_proxy.py
+++ b/infrastructure-provisioning/src/project/scripts/configure_nginx_reverse_proxy.py
@@ -26,12 +26,16 @@
 import argparse
 import sys
 import os
-from dlab.edge_lib import install_nginx_ldap
+from dlab.common_lib import ensure_step
+from dlab.edge_lib import install_nginx_lua
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--hostname', type=str, default='')
 parser.add_argument('--keyfile', type=str, default='')
 parser.add_argument('--user', type=str, default='')
+parser.add_argument('--keycloak_client_id', type=str, default='')
+parser.add_argument('--keycloak_client_secret', type=str, default='')
+parser.add_argument('--step_cert_sans', type=str, default='')
 args = parser.parse_args()
 
 if __name__ == "__main__":
@@ -51,12 +55,18 @@
     except Exception as err:
         print("Failed establish connection. Excpeption: " + str(err))
         sys.exit(1)
+    if os.environ['conf_stepcerts_enabled'] == 'true':
+        try:
+            ensure_step(args.user)
+        except Exception as err:
+            print("Failed install step: " + str(err))
+            sys.exit(1)
 
     try:
-        install_nginx_ldap(args.hostname, os.environ['reverse_proxy_nginx_version'],
-                           os.environ['ldap_hostname'], os.environ['ldap_dn'],
-                           os.environ['ldap_ou'], os.environ['ldap_service_password'],
-                           os.environ['ldap_service_username'])
+        install_nginx_lua(args.hostname, os.environ['reverse_proxy_nginx_version'],
+                          os.environ['keycloak_auth_server_url'], os.environ['keycloak_realm_name'],
+                          args.keycloak_client_id, args.keycloak_client_secret, args.user, args.hostname,
+                          args.step_cert_sans)
     except Exception as err:
         print("Failed install nginx reverse proxy: " + str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/project/templates/conf.d/proxy.conf b/infrastructure-provisioning/src/project/templates/conf.d/proxy.conf
index 3c46029..a58147f 100644
--- a/infrastructure-provisioning/src/project/templates/conf.d/proxy.conf
+++ b/infrastructure-provisioning/src/project/templates/conf.d/proxy.conf
@@ -21,8 +21,46 @@
 server {
     listen 80;
     server_name EDGE_IP;
-	auth_ldap "Forbidden";
-    auth_ldap_servers ldap1;
-
     include locations/*.conf;
+    rewrite ^/(.*)$ https://$server_name/$1 permanent;
+}
+
+server {
+    listen       443 ssl;
+    server_name  EDGE_IP;
+    if ($host != $server_name) {
+        rewrite ^/(.*)$ https://$server_name/$1 redirect;
+    }
+    # SSL section
+    proxy_buffering off;
+    ssl on;
+    ssl_certificate /etc/ssl/certs/dlab.crt;
+    ssl_certificate_key /etc/ssl/certs/dlab.key;
+    ssl_session_timeout 5m;
+    ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
+    ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';
+    ssl_prefer_server_ciphers on;
+    # ssl_dhparam /etc/ssl/certs/dhparam.pem;
+
+    access_by_lua '
+          local opts = {
+            redirect_uri_path = "/*",
+            accept_none_alg = true,
+            discovery = "KEYCLOAK_AUTH_URL/realms/KEYCLOAK_REALM_NAME/.well-known/openid-configuration",
+            client_id = "KEYCLOAK_CLIENT_ID",
+            client_secret = "KEYCLOAK_CLIENT_SECRET",
+            ssl_verify = "no",
+            redirect_uri_scheme = "https",
+            session_contents = {id_token=true}
+          }
+          local res, err = require("resty.openidc").authenticate(opts)
+
+          if err then
+            ngx.status = 403
+            ngx.say(err)
+            ngx.exit(ngx.HTTP_FORBIDDEN)
+          end
+       ';
+
+   include locations/*.conf;
 }
diff --git a/infrastructure-provisioning/src/project/templates/locations/superset.conf b/infrastructure-provisioning/src/project/templates/locations/superset.conf
new file mode 100644
index 0000000..5fd66b0
--- /dev/null
+++ b/infrastructure-provisioning/src/project/templates/locations/superset.conf
@@ -0,0 +1,62 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+location /{{ NAME }}/ {
+    rewrite ^/{{ NAME }}/(.*)$ /$1 break;
+    proxy_pass http://{{ IP }}:8088/;
+    proxy_redirect http://{{ IP }}:8088/ http://$host/{{ NAME }}/;
+    proxy_redirect http://{{ IP }}:8088/superset/welcome $scheme://$host/{{ NAME }}/superset/welcome;
+    proxy_http_version 1.1;
+    proxy_set_header Upgrade $http_upgrade;
+    proxy_set_header Connection "upgrade";
+    proxy_set_header Accept-Encoding "";
+    sub_filter_types *;
+    sub_filter_once off;
+    sub_filter_last_modified on;
+    sub_filter '/static/' '/{{ NAME }}/static/';
+    sub_filter '/superset/' '/{{ NAME }}/superset/';
+    sub_filter '/chart/' '/{{ NAME }}/chart/';
+    sub_filter '/dashboard/' '/{{ NAME }}/dashboard/';
+    sub_filter '/sqllab/' '/{{ NAME }}/sqllab/';
+    sub_filter '/tablemodelview/' '/{{ NAME }}/tablemodelview/';
+    sub_filter '/csvtodatabaseview/' '/{{ NAME }}/csvtodatabaseview/';
+    sub_filter '/druid/' '/{{ NAME }}/druid/';
+    sub_filter '/druidclustermodelview/' '/{{ NAME }}/druidclustermodelview/';
+    sub_filter '/druiddatasourcemodelview/' '/{{ NAME }}/druiddatasourcemodelview/';
+    sub_filter '/druiddatasourcemodelview/' '/{{ NAME }}/druiddatasourcemodelview/';
+    sub_filter '/databaseview/' '/{{ NAME }}/databaseview/';
+    sub_filter '/dashboardasync/' '/{{ NAME }}/dashboardasync/';
+    sub_filter '/users/' '/{{ NAME }}/users/';
+    sub_filter '/userstatschartview/' '/{{ NAME }}/userstatschartview/';
+    sub_filter '/registeruser/' '/{{ NAME }}/registeruser/';
+    sub_filter '/permissions/' '/{{ NAME }}/permissions/';
+    sub_filter '/viewmenus/' '/{{ NAME }}/viewmenus/';
+    sub_filter '/permissionviews/' '/{{ NAME }}/permissionviews/';
+    sub_filter '/roles/' '/{{ NAME }}/roles/';
+    sub_filter '/userstatschartview/' '/{{ NAME }}/userstatschartview/';
+    sub_filter '/csstemplatemodelview/' '/{{ NAME }}/csstemplatemodelview/';
+    sub_filter '/queryview/' '/{{ NAME }}/queryview/';
+    sub_filter '/annotationlayermodelview/' '/{{ NAME }}/annotationlayermodelview/';
+    sub_filter '/annotationmodelview/' '/{{ NAME }}/annotationmodelview/';
+    sub_filter '/lang/' '/{{ NAME }}/lang/';
+    sub_filter '/logout/' '/{{ NAME }}/logout/';
+    sub_filter '/logmodelview/' '/{{ NAME }}/logmodelview/';
+    #proxy_set_header SCRIPT_NAME /{{ NAME }};
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/project/templates/nginx.conf b/infrastructure-provisioning/src/project/templates/nginx.conf
index f3f4958..d012375 100644
--- a/infrastructure-provisioning/src/project/templates/nginx.conf
+++ b/infrastructure-provisioning/src/project/templates/nginx.conf
@@ -19,10 +19,13 @@
 #
 # ******************************************************************************
 user nginx;
-worker_processes auto;
+worker_processes 1;
 error_log /var/log/nginx/error.log;
 pid /run/nginx.pid;
 
+load_module /etc/nginx/modules/ndk_http_module.so;
+load_module /etc/nginx/modules/ngx_http_lua_module.so;
+
 include /usr/share/nginx/modules/*.conf;
 
 events {
@@ -44,21 +47,14 @@
     proxy_read_timeout 86400s;
     proxy_send_timeout 86400s;
     client_max_body_size 50M;
+    resolver 8.8.8.8;
+    resolver_timeout 10s;
 
     include             /etc/nginx/mime.types;
     default_type        application/octet-stream;
 
-    auth_ldap_cache_enabled on;
-    auth_ldap_cache_expiration_time 10000;
-    auth_ldap_cache_size 1000;
-
-    ldap_server ldap1 {
-        url ldap://LDAP_IP:389/LDAP_DN?uid,mail?sub?(&(objectClass=posixAccount));
-        binddn "LDAP_SERVICE_USERNAME,LDAP_DN";
-        binddn_passwd "LDAP_SERVICE_PASSWORD";
-        require valid_user;
-        request_timeout 30s;
-    }
+    lua_shared_dict discovery 1m;
+    lua_shared_dict jwks 1m;
 
     include /etc/nginx/conf.d/*.conf;
 }
diff --git a/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py b/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
index a2878b8..34fb007 100644
--- a/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
+++ b/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
@@ -40,7 +40,7 @@
 parser.add_argument('--rstudio_pass', type=str, default='')
 parser.add_argument('--rstudio_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -113,7 +113,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress, True)
+    install_inactivity_checker(args.os_user, args.ip_address, True)
 
     #POST INSTALLATION PROCESS
     print("Updating pyOpenSSL library")
diff --git a/infrastructure-provisioning/src/ssn/fabfile.py b/infrastructure-provisioning/src/ssn/fabfile.py
index 3f97925..29c3cc5 100644
--- a/infrastructure-provisioning/src/ssn/fabfile.py
+++ b/infrastructure-provisioning/src/ssn/fabfile.py
@@ -28,6 +28,7 @@
 from fabric.api import *
 from dlab.fab import *
 import traceback
+import uuid
 
 
 def run():
@@ -36,15 +37,17 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
+    ssn_config = dict()
+    ssn_config['ssn_unique_index'] = str(uuid.uuid4())[:5]
     try:
-        local("~/scripts/{}.py".format('ssn_prepare'))
+        local("~/scripts/{}.py --ssn_unique_index {}".format('ssn_prepare', ssn_config['ssn_unique_index']))
     except Exception as err:
         traceback.print_exc()
         append_result("Failed preparing SSN node.", str(err))
         sys.exit(1)
 
     try:
-        local("~/scripts/{}.py".format('ssn_configure'))
+        local("~/scripts/{}.py --ssn_unique_index {}".format('ssn_configure', ssn_config['ssn_unique_index']))
     except Exception as err:
         traceback.print_exc()
         append_result("Failed configuring SSN node.", str(err))
diff --git a/infrastructure-provisioning/src/ssn/files/aws/mongo_roles.json b/infrastructure-provisioning/src/ssn/files/aws/mongo_roles.json
index 70eb16e..54d2cd6 100644
--- a/infrastructure-provisioning/src/ssn/files/aws/mongo_roles.json
+++ b/infrastructure-provisioning/src/ssn/files/aws/mongo_roles.json
@@ -110,6 +110,16 @@
     ]
   },
   {
+    "_id": "nbCreateJupyterLab",
+    "description": "Create Notebook JupyterLab",
+    "exploratories": [
+      "docker.dlab-jupyterlab"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
     "exploratories": [
diff --git a/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json b/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
index 58cadb3..305c46b 100644
--- a/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
+++ b/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
@@ -1,7 +1,7 @@
 [
   {
     "_id": "nbShapes_Standard_NC6_fetching",
-    "description": "Allow to use Standard_NC6 instance shape for notebook",
+    "description": "Use Standard_NC6 instance shape for notebook",
     "exploratory_shapes": [
       "Standard_NC6"
     ],
diff --git a/infrastructure-provisioning/src/ssn/files/gcp/mongo_roles.json b/infrastructure-provisioning/src/ssn/files/gcp/mongo_roles.json
index cf7b398..43d12e3 100644
--- a/infrastructure-provisioning/src/ssn/files/gcp/mongo_roles.json
+++ b/infrastructure-provisioning/src/ssn/files/gcp/mongo_roles.json
@@ -90,6 +90,26 @@
     ]
   },
   {
+    "_id": "nbCreateJupyterLab",
+    "description": "Create Notebook JupyterLab",
+    "exploratories": [
+      "docker.dlab-jupyterlab"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateSuperset",
+    "description": "Create Notebook Superset",
+    "exploratories": [
+      "docker.dlab-superset"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
     "exploratories": [
diff --git a/infrastructure-provisioning/src/ssn/scripts/configure_billing.py b/infrastructure-provisioning/src/ssn/scripts/configure_billing.py
index 9f40e72..1df44cc 100644
--- a/infrastructure-provisioning/src/ssn/scripts/configure_billing.py
+++ b/infrastructure-provisioning/src/ssn/scripts/configure_billing.py
@@ -49,15 +49,24 @@
 parser.add_argument('--region_info', type=str, default='', help='Azure region info')
 parser.add_argument('--mongo_password', type=str, help='The password for Mongo DB')
 parser.add_argument('--dlab_dir', type=str, help='The path to dlab dir')
-parser.add_argument('--dlab_id', type=str, default='', help='Column name in report file that contains dlab id tag')
-parser.add_argument('--usage_date', type=str, default='', help='Column name in report file that contains usage date tag')
-parser.add_argument('--product', type=str, default='', help='Column name in report file that contains product name tag')
-parser.add_argument('--usage_type', type=str, default='', help='Column name in report file that contains usage type tag')
-parser.add_argument('--usage', type=str, default='', help='Column name in report file that contains usage tag')
-parser.add_argument('--cost', type=str, default='', help='Column name in report file that contains cost tag')
-parser.add_argument('--resource_id', type=str, default='', help='Column name in report file that contains dlab resource id tag')
-parser.add_argument('--tags', type=str, default='', help='Column name in report file that contains tags')
+parser.add_argument('--dlab_id', type=str, default='resource_tags_user_user_tag', help='Column name in report file that contains dlab id tag')
+parser.add_argument('--usage_date', type=str, default='line_item_usage_start_date', help='Column name in report file that contains usage date tag')
+parser.add_argument('--product', type=str, default='product_product_name', help='Column name in report file that contains product name tag')
+parser.add_argument('--usage_type', type=str, default='line_item_usage_type', help='Column name in report file that contains usage type tag')
+parser.add_argument('--usage', type=str, default='line_item_usage_amount', help='Column name in report file that contains usage tag')
+parser.add_argument('--cost', type=str, default='line_item_blended_cost', help='Column name in report file that contains cost tag')
+parser.add_argument('--resource_id', type=str, default='line_item_resource_id', help='Column name in report file that contains dlab resource id tag')
+parser.add_argument('--tags', type=str, default='line_item_operation,line_item_line_item_description', help='Column name in report file that contains tags')
 parser.add_argument('--billing_dataset_name', type=str, default='', help='Name of gcp billing dataset (in big query service')
+
+parser.add_argument('--mongo_host', type=str, default='localhost', help='Mongo DB host')
+parser.add_argument('--mongo_port', type=str, default='27017', help='Mongo DB port')
+parser.add_argument('--service_base_name', type=str, help='Service Base Name')
+parser.add_argument('--os_user', type=str, help='Dlab user')
+parser.add_argument('--keystore_password', type=str, help='Keystore password')
+parser.add_argument('--keycloak_client_id', type=str, help='Keycloak client id')
+parser.add_argument('--keycloak_client_secret', type=str, help='Keycloak client secret')
+parser.add_argument('--keycloak_auth_server_url', type=str, help='Keycloak auth server url')
 args = parser.parse_args()
 
 
@@ -70,38 +79,76 @@
         if args.cloud_provider == 'aws':
             if args.aws_job_enabled == 'true':
                 args.tag_resource_id =  'resourceTags' + ':' + args.tag_resource_id
-            config_orig = config_orig.replace('<BILLING_BUCKET_NAME>', args.billing_bucket)
-            config_orig = config_orig.replace('<AWS_JOB_ENABLED>', args.aws_job_enabled)
-            config_orig = config_orig.replace('<REPORT_PATH>', args.report_path)
-            config_orig = config_orig.replace('<ACCOUNT_ID>', args.account_id)
-            config_orig = config_orig.replace('<ACCESS_KEY_ID>', args.access_key_id)
-            config_orig = config_orig.replace('<SECRET_ACCESS_KEY>', args.secret_access_key)
-            config_orig = config_orig.replace('<CONF_BILLING_TAG>', args.billing_tag)
-            config_orig = config_orig.replace('<CONF_SERVICE_BASE_NAME>', args.infrastructure_tag)
-            config_orig = config_orig.replace('<MONGODB_PASSWORD>', args.mongo_password)
-            config_orig = config_orig.replace('<DLAB_ID>', args.dlab_id)
-            config_orig = config_orig.replace('<USAGE_DATE>', args.usage_date)
-            config_orig = config_orig.replace('<PRODUCT>', args.product)
-            config_orig = config_orig.replace('<USAGE_TYPE>', args.usage_type)
-            config_orig = config_orig.replace('<USAGE>', args.usage)
-            config_orig = config_orig.replace('<COST>', args.cost)
-            config_orig = config_orig.replace('<RESOURCE_ID>', args.resource_id)
-            config_orig = config_orig.replace('<TAGS>', args.tags)
+            config_orig = config_orig.replace('MONGO_HOST', args.mongo_host)
+            config_orig = config_orig.replace('MONGO_PASSWORD', args.mongo_password)
+            config_orig = config_orig.replace('MONGO_PORT', args.mongo_port)
+            config_orig = config_orig.replace('BILLING_BUCKET_NAME', args.billing_bucket)
+            config_orig = config_orig.replace('REPORT_PATH', args.report_path)
+            config_orig = config_orig.replace('AWS_JOB_ENABLED', args.aws_job_enabled)
+            config_orig = config_orig.replace('ACCOUNT_ID', args.account_id)
+            config_orig = config_orig.replace('ACCESS_KEY_ID', args.access_key_id)
+            config_orig = config_orig.replace('SECRET_ACCESS_KEY', args.secret_access_key)
+            config_orig = config_orig.replace('CONF_BILLING_TAG', args.billing_tag)
+            config_orig = config_orig.replace('SERVICE_BASE_NAME', args.service_base_name)
+            config_orig = config_orig.replace('DLAB_ID', args.dlab_id)
+            config_orig = config_orig.replace('USAGE_DATE', args.usage_date)
+            config_orig = config_orig.replace('PRODUCT', args.product)
+            config_orig = config_orig.replace('USAGE_TYPE', args.usage_type)
+            config_orig = config_orig.replace('USAGE', args.usage)
+            config_orig = config_orig.replace('COST', args.cost)
+            config_orig = config_orig.replace('RESOURCE_ID', args.resource_id)
+            config_orig = config_orig.replace('TAGS', args.tags)
         elif args.cloud_provider == 'azure':
-            config_orig = config_orig.replace('<CLIENT_ID>', args.client_id)
-            config_orig = config_orig.replace('<CLIENT_SECRET>', args.client_secret)
-            config_orig = config_orig.replace('<TENANT_ID>', args.tenant_id)
-            config_orig = config_orig.replace('<SUBSCRIPTION_ID>', args.subscription_id)
-            config_orig = config_orig.replace('<AUTHENTICATION_FILE>', args.authentication_file)
-            config_orig = config_orig.replace('<OFFER_NUMBER>', args.offer_number)
-            config_orig = config_orig.replace('<CURRENCY>', args.currency)
-            config_orig = config_orig.replace('<LOCALE>', args.locale)
-            config_orig = config_orig.replace('<REGION_INFO>', args.region_info)
-            config_orig = config_orig.replace('<MONGODB_PASSWORD>', args.mongo_password)
+            config_orig = config_orig.replace('SERVICE_BASE_NAME', args.service_base_name)
+            config_orig = config_orig.replace('OS_USER', args.os_user)
+            config_orig = config_orig.replace('MONGO_PASSWORD', args.mongo_password)
+            config_orig = config_orig.replace('MONGO_PORT', args.mongo_port)
+            config_orig = config_orig.replace('MONGO_HOST', args.mongo_host)
+            config_orig = config_orig.replace('KEY_STORE_PASSWORD', args.keystore_password)
+            config_orig = config_orig.replace('KEYCLOAK_CLIENT_ID', args.keycloak_client_id)
+            config_orig = config_orig.replace('KEYCLOAK_CLIENT_SECRET', args.keycloak_client_secret)
+            config_orig = config_orig.replace('KEYCLOAK_AUTH_SERVER_URL', args.keycloak_auth_server_url)
+            config_orig = config_orig.replace('CLIENT_ID', args.client_id)
+            config_orig = config_orig.replace('CLIENT_SECRET', args.client_secret)
+            config_orig = config_orig.replace('TENANT_ID', args.tenant_id)
+            config_orig = config_orig.replace('SUBSCRIPTION_ID', args.subscription_id)
+            config_orig = config_orig.replace('AUTHENTICATION_FILE', args.authentication_file)
+            config_orig = config_orig.replace('OFFER_NUMBER', args.offer_number)
+            config_orig = config_orig.replace('CURRENCY', args.currency)
+            config_orig = config_orig.replace('LOCALE', args.locale)
+            config_orig = config_orig.replace('REGION_INFO', args.region_info)
         elif args.cloud_provider == 'gcp':
-            config_orig = config_orig.replace('<CONF_SERVICE_BASE_NAME>', args.infrastructure_tag)
-            config_orig = config_orig.replace('<MONGO_PASSWORD>', args.mongo_password)
-            config_orig = config_orig.replace('<BILLING_DATASET_NAME>', args.billing_dataset_name)
+            config_orig = config_orig.replace('SERVICE_BASE_NAME', args.service_base_name)
+            config_orig = config_orig.replace('OS_USER', args.os_user)
+            config_orig = config_orig.replace('MONGO_PASSWORD', args.mongo_password)
+            config_orig = config_orig.replace('MONGO_PORT', args.mongo_port)
+            config_orig = config_orig.replace('MONGO_HOST', args.mongo_host)
+            config_orig = config_orig.replace('KEY_STORE_PASSWORD', args.keystore_password)
+            config_orig = config_orig.replace('DATASET_NAME', args.billing_dataset_name)
+            config_orig = config_orig.replace('KEYCLOAK_CLIENT_ID', args.keycloak_client_id)
+            config_orig = config_orig.replace('KEYCLOAK_CLIENT_SECRET', args.keycloak_client_secret)
+            config_orig = config_orig.replace('KEYCLOAK_AUTH_SERVER_URL', args.keycloak_auth_server_url)
+        f = open(path, 'w')
+        f.write(config_orig)
+        f.close()
+    except:
+        print("Could not write the target file {}".format(path))
+        sys.exit(1)
+
+def yml_billing_app(path):
+    try:
+        with open(path, 'r') as config_yml_r:
+            config_orig = config_yml_r.read()
+
+        config_orig = config_orig.replace('MONGO_HOST', args.mongo_host)
+        config_orig = config_orig.replace('MONGO_PASSWORD', args.mongo_password)
+        config_orig = config_orig.replace('MONGO_PORT', args.mongo_port)
+        config_orig = config_orig.replace('OS_USER', args.os_user)
+        config_orig = config_orig.replace('KEY_STORE_PASSWORD', args.keystore_password)
+        config_orig = config_orig.replace('KEYCLOAK_CLIENT_ID', args.keycloak_client_id)
+        config_orig = config_orig.replace('KEYCLOAK_CLIENT_SECRET', args.keycloak_client_secret)
+        config_orig = config_orig.replace('KEYCLOAK_AUTH_SERVER_URL', args.keycloak_auth_server_url)
+
         f = open(path, 'w')
         f.write(config_orig)
         f.close()
@@ -134,6 +181,8 @@
     # Access to the bucket without credentials?
     try:
         yml_billing(args.dlab_dir + 'conf/billing.yml')
+        if args.cloud_provider == 'aws':
+            yml_billing_app(args.dlab_dir + 'conf/billing_app.yml')
         yml_self_service(args.dlab_dir + 'conf/self-service.yml')
     except:
         print('Error configure billing')
diff --git a/infrastructure-provisioning/src/ssn/scripts/configure_docker.py b/infrastructure-provisioning/src/ssn/scripts/configure_docker.py
index 7176c8b..727f97e 100644
--- a/infrastructure-provisioning/src/ssn/scripts/configure_docker.py
+++ b/infrastructure-provisioning/src/ssn/scripts/configure_docker.py
@@ -55,6 +55,7 @@
     sudo("python /tmp/configure_conf_file.py --dlab_dir {} --variables_list '{}'".format(
         args.dlab_path, json.dumps(variables_list)))
 
+
 def download_toree():
     toree_path = '/opt/dlab/sources/infrastructure-provisioning/src/general/files/os/'
     tarball_link = 'https://archive.apache.org/dist/incubator/toree/0.2.0-incubating/toree/toree-0.2.0-incubating-bin.tar.gz'
@@ -69,6 +70,7 @@
         print('Failed to download toree: ', str(err))
         sys.exit(1)
 
+
 def add_china_repository(dlab_path):
     with cd('{}sources/infrastructure-provisioning/src/base/'.format(dlab_path)):
         sudo('sed -i "/pip install/s/$/ -i https\:\/\/{0}\/simple --trusted-host {0} --timeout 60000/g" '
@@ -106,6 +108,7 @@
     except:
         return False
 
+
 def configure_guacamole():
     try:
         mysql_pass = id_generator()
@@ -174,8 +177,10 @@
         sys.exit(1)
 
     print("Building dlab images")
-    if not build_docker_images(deeper_config, args.region, args.dlab_path):
-        sys.exit(1)
+    count = 0
+    while not build_docker_images(deeper_config, args.region, args.dlab_path) and count < 5:
+        count += 1
+        time.sleep(5)
 
     print("Configuring guacamole")
     if not configure_guacamole():
diff --git a/infrastructure-provisioning/src/ssn/scripts/configure_ssn_node.py b/infrastructure-provisioning/src/ssn/scripts/configure_ssn_node.py
index d5c454f..42a6a09 100644
--- a/infrastructure-provisioning/src/ssn/scripts/configure_ssn_node.py
+++ b/infrastructure-provisioning/src/ssn/scripts/configure_ssn_node.py
@@ -27,6 +27,7 @@
 import sys
 import os
 from dlab.ssn_lib import *
+from dlab.common_lib import *
 from dlab.fab import *
 import traceback
 
@@ -37,6 +38,7 @@
 parser.add_argument('--os_user', type=str, default='')
 parser.add_argument('--dlab_path', type=str, default='')
 parser.add_argument('--tag_resource_id', type=str, default='')
+parser.add_argument('--step_cert_sans', type=str, default='')
 args = parser.parse_args()
 
 
@@ -128,8 +130,59 @@
             sudo('mv dlab.crt /etc/ssl/certs/dlab.crt')
             sudo('mv dlab.key /etc/ssl/certs/dlab.key')
         else:
-            sudo('openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout /etc/ssl/certs/dlab.key \
-                 -out /etc/ssl/certs/dlab.crt -subj "/C=US/ST=US/L=US/O=dlab/CN={}"'.format(hostname))
+            if os.environ['conf_stepcerts_enabled'] == 'true':
+                ensure_step(args.os_user)
+                sudo('mkdir -p /home/{0}/keys'.format(args.os_user))
+                sudo('''bash -c 'echo "{0}" | base64 --decode > /etc/ssl/certs/root_ca.crt' '''.format(
+                     os.environ['conf_stepcerts_root_ca']))
+                fingerprint = sudo('step certificate fingerprint /etc/ssl/certs/root_ca.crt')
+                sudo('step ca bootstrap --fingerprint {0} --ca-url "{1}"'.format(fingerprint,
+                                                                                 os.environ['conf_stepcerts_ca_url']))
+                sudo('echo "{0}" > /home/{1}/keys/provisioner_password'.format(
+                     os.environ['conf_stepcerts_kid_password'], args.os_user))
+                sans = "--san localhost --san 127.0.0.1 {0}".format(args.step_cert_sans)
+                cn = hostname
+                sudo('step ca token {3} --kid {0} --ca-url "{1}" --root /etc/ssl/certs/root_ca.crt '
+                     '--password-file /home/{2}/keys/provisioner_password {4} --output-file /tmp/step_token'.format(
+                              os.environ['conf_stepcerts_kid'], os.environ['conf_stepcerts_ca_url'],
+                              args.os_user, cn, sans))
+                token = sudo('cat /tmp/step_token')
+                sudo('step ca certificate "{0}" /etc/ssl/certs/dlab.crt /etc/ssl/certs/dlab.key '
+                     '--token "{1}" --kty=RSA --size 2048 --provisioner {2} '.format(cn, token,
+                                                                                     os.environ['conf_stepcerts_kid']))
+                sudo('touch /var/log/renew_certificates.log')
+                put('/root/templates/renew_certificates.sh', '/tmp/renew_certificates.sh')
+                sudo('mv /tmp/renew_certificates.sh /usr/local/bin/')
+                sudo('chmod +x /usr/local/bin/renew_certificates.sh')
+                sudo('sed -i "s/OS_USER/{0}/g" /usr/local/bin/renew_certificates.sh'.format(args.os_user))
+                sudo('sed -i "s|JAVA_HOME|{0}|g" /usr/local/bin/renew_certificates.sh'.format(find_java_path_remote()))
+                sudo('sed -i "s|RESOURCE_TYPE|ssn|g" /usr/local/bin/renew_certificates.sh')
+                sudo('sed -i "s|CONF_FILE|ssn|g" /usr/local/bin/renew_certificates.sh')
+                put('/root/templates/manage_step_certs.sh', '/usr/local/bin/manage_step_certs.sh', use_sudo=True)
+                sudo('chmod +x /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_ROOT_CERT_PATH|/etc/ssl/certs/root_ca.crt|g" '
+                     '/usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_CERT_PATH|/etc/ssl/certs/dlab.crt|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_KEY_PATH|/etc/ssl/certs/dlab.key|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|STEP_CA_URL|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(
+                    os.environ['conf_stepcerts_ca_url']))
+                sudo('sed -i "s|RESOURCE_TYPE|ssn|g" /usr/local/bin/manage_step_certs.sh')
+                sudo('sed -i "s|SANS|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(sans))
+                sudo('sed -i "s|CN|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(cn))
+                sudo('sed -i "s|KID|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(
+                    os.environ['conf_stepcerts_kid']))
+                sudo('sed -i "s|STEP_PROVISIONER_PASSWORD_PATH|/home/{0}/keys/provisioner_password|g" '
+                     '/usr/local/bin/manage_step_certs.sh'.format(args.os_user))
+                sudo('bash -c \'echo "0 * * * * root /usr/local/bin/manage_step_certs.sh >> '
+                     '/var/log/renew_certificates.log 2>&1" >> /etc/crontab \'')
+                put('/root/templates/step-cert-manager.service', '/etc/systemd/system/step-cert-manager.service',
+                    use_sudo=True)
+                sudo('systemctl daemon-reload')
+                sudo('systemctl enable step-cert-manager.service')
+
+            else:
+                sudo('openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout /etc/ssl/certs/dlab.key \
+                     -out /etc/ssl/certs/dlab.crt -subj "/C=US/ST=US/L=US/O=dlab/CN={}"'.format(hostname))
         sudo('openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048')
     except Exception as err:
         traceback.print_exc()
@@ -149,6 +202,8 @@
 ##############
 # Run script #
 ##############
+
+
 if __name__ == "__main__":
     print("Configure connections")
     try:
@@ -179,6 +234,9 @@
     print("Installing nginx as frontend.")
     ensure_nginx(args.dlab_path)
 
+    print("Installing Java")
+    ensure_java(args.os_user)
+
     print("Configuring ssl key and cert for nginx.")
     configure_ssl_certs(args.hostname, custom_ssl_cert)
 
diff --git a/infrastructure-provisioning/src/ssn/scripts/configure_ui.py b/infrastructure-provisioning/src/ssn/scripts/configure_ui.py
index 8e1e56f..2e3cd85 100644
--- a/infrastructure-provisioning/src/ssn/scripts/configure_ui.py
+++ b/infrastructure-provisioning/src/ssn/scripts/configure_ui.py
@@ -72,6 +72,9 @@
 parser.add_argument('--cost', type=str, default=None)
 parser.add_argument('--resource_id', type=str, default=None)
 parser.add_argument('--tags', type=str, default=None)
+parser.add_argument('--keycloak_client_id', type=str, default=None)
+parser.add_argument('--keycloak_client_secret', type=str, default=None)
+parser.add_argument('--keycloak_auth_server_url', type=str, default=None)
 args = parser.parse_args()
 
 dlab_conf_dir = args.dlab_path + 'conf/'
@@ -127,6 +130,8 @@
                                                                                              env.host_string))
         sudo('mv /tmp/mongo_roles.json ' + args.dlab_path + 'tmp/')
         sudo('sed -i "s|DEF_ENDPOINT_NAME|{0}|g" /tmp/local_endpoint.json'.format(default_endpoint_name))
+        sudo('sed -i "s|CLOUD_PROVIDER|{0}|g" /tmp/local_endpoint.json'.format(
+            os.environ['conf_cloud_provider'].upper()))
         sudo('mv /tmp/local_endpoint.json ' + args.dlab_path + 'tmp/')
         sudo("python " + args.dlab_path + "tmp/configure_mongo.py --dlab_path {} ".format(
             args.dlab_path))
@@ -147,7 +152,7 @@
                      '\'use_ldap\': false'))
 
             sudo('echo "N" | npm install')
-            sudo('npm run build.prod')
+            manage_npm_pkg('run build.prod')
             sudo('sudo chown -R {} {}/*'.format(args.os_user, args.dlab_path))
 
         # Building Back-end
@@ -173,6 +178,8 @@
                 args.dlab_path))
         elif args.cloud_provider == 'aws':
             sudo('cp {0}/sources/services/billing-aws/billing.yml {0}/webapp/billing/conf/'.format(args.dlab_path))
+            sudo('cp {0}/sources/services/billing-aws/src/main/resources/application.yml '
+                 '{0}/webapp/billing/conf/billing_app.yml'.format(args.dlab_path))
             sudo(
                 'cp {0}/sources/services/billing-aws/target/billing-aws*.jar {0}/webapp/billing/lib/'.format(
                     args.dlab_path))
@@ -230,4 +237,5 @@
              args.region_info, args.ldap_login, args.tenant_id, args.application_id,
              args.hostname, args.datalake_store_name, args.subscription_id, args.validate_permission_scope,
              args.dlab_id, args.usage_date, args.product, args.usage_type,
-             args.usage, args.cost, args.resource_id, args.tags, args.billing_dataset_name)
+             args.usage, args.cost, args.resource_id, args.tags, args.billing_dataset_name, args.keycloak_client_id,
+             args.keycloak_client_secret, args.keycloak_auth_server_url)
diff --git a/infrastructure-provisioning/src/ssn/scripts/docker_build.py b/infrastructure-provisioning/src/ssn/scripts/docker_build.py
index 50f8463..ac4fee5 100644
--- a/infrastructure-provisioning/src/ssn/scripts/docker_build.py
+++ b/infrastructure-provisioning/src/ssn/scripts/docker_build.py
@@ -34,12 +34,15 @@
             'edge',
             'project',
             'jupyter',
+            'jupyterlab',
             'rstudio',
             'zeppelin',
             'tensor',
             'tensor-rstudio',
             'deeplearning',
-            'dataengine'
+            'dataengine',
+            'dataengine-service',
+            'superset'
             ]
 else:
     node = sys.argv[1:]
diff --git a/infrastructure-provisioning/src/ssn/templates/nginx_proxy.conf b/infrastructure-provisioning/src/ssn/templates/nginx_proxy.conf
index 280e52c..3a293fc 100644
--- a/infrastructure-provisioning/src/ssn/templates/nginx_proxy.conf
+++ b/infrastructure-provisioning/src/ssn/templates/nginx_proxy.conf
@@ -23,14 +23,17 @@
 
     listen 80;
     server_name  SSN_HOSTNAME;
-    # rewrite ^/(.*)$ https://$server_name/$1 permanent;
     proxy_buffering off;
     include locations/proxy_location_*.conf;
+    rewrite ^/(.*)$ https://$server_name/$1 permanent;
 }
 
 server {
     listen       443 ssl;
     server_name  SSN_HOSTNAME;
+    if ($host != $server_name) {
+        rewrite ^/(.*)$ https://$server_name/$1 redirect;
+    }
     # SSL section
     proxy_buffering off;
     ssl on;
@@ -40,7 +43,7 @@
     ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
     ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';
     ssl_prefer_server_ciphers on;
-    ssl_dhparam /etc/ssl/certs/dhparam.pem;
+    # ssl_dhparam /etc/ssl/certs/dhparam.pem;
 
     include locations/proxy_location_*.conf;
 }
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/ssn/templates/ssn.yml b/infrastructure-provisioning/src/ssn/templates/ssn.yml
index bed2bf6..7b18d26 100644
--- a/infrastructure-provisioning/src/ssn/templates/ssn.yml
+++ b/infrastructure-provisioning/src/ssn/templates/ssn.yml
@@ -21,7 +21,7 @@
 
 <#assign LOG_ROOT_DIR="/var/opt/dlab/log">
 <#assign KEYS_DIR="/home/${sys['user.name']}/keys">
-<#assign KEY_STORE_PATH="${KEYS_DIR}/dlab.keystore.jks">
+<#assign KEY_STORE_PATH="${KEYS_DIR}/ssn.keystore.jks">
 <#assign KEY_STORE_PASSWORD="KEYSTORE_PASSWORD">
 <#assign TRUST_STORE_PATH="${JRE_HOME}/lib/security/cacerts">
 <#assign TRUST_STORE_PASSWORD="changeit">
@@ -62,5 +62,10 @@
     timeout: 3s
     connectionTimeout: 3s
 
+billingService:
+  jerseyClient:
+    timeout: 4m
+    connectionTimeout: 3s
+
 # Log out user on inactivity
 inactiveUserTimeoutMillSec: 7200000
diff --git a/infrastructure-provisioning/src/ssn/templates/ssn_nginx.conf b/infrastructure-provisioning/src/ssn/templates/ssn_nginx.conf
index dd9d035..74f58db 100644
--- a/infrastructure-provisioning/src/ssn/templates/ssn_nginx.conf
+++ b/infrastructure-provisioning/src/ssn/templates/ssn_nginx.conf
@@ -43,6 +43,7 @@
     proxy_buffering     off;
     keepalive_timeout   65;
     types_hash_max_size 2048;
+    client_max_body_size 50M;
 
     include             /etc/nginx/mime.types;
     default_type        application/octet-stream;
diff --git a/infrastructure-provisioning/src/superset/fabfile.py b/infrastructure-provisioning/src/superset/fabfile.py
new file mode 100644
index 0000000..ec1e6f0
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/fabfile.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import logging
+import json
+import sys
+from dlab.fab import *
+from dlab.meta_lib import *
+from dlab.actions_lib import *
+import os
+import uuid
+
+
+# Main function for provisioning notebook server
+def run():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    notebook_config = dict()
+    notebook_config['uuid'] = str(uuid.uuid4())[:5]
+
+    try:
+        params = "--uuid {}".format(notebook_config['uuid'])
+        local("~/scripts/{}.py {}".format('common_prepare_notebook', params))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed preparing Notebook node.", str(err))
+        sys.exit(1)
+
+    try:
+        params = "--uuid {}".format(notebook_config['uuid'])
+        local("~/scripts/{}.py {}".format('superset_configure', params))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed configuring Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for terminating exploratory environment
+def terminate():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        local("~/scripts/{}.py".format('common_terminate_notebook'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed terminating Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for stopping notebook server
+def stop():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+    try:
+        local("~/scripts/{}.py".format('common_stop_notebook'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed stopping Notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for starting notebook server
+def start():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('common_start_notebook'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed starting Notebook node.", str(err))
+        sys.exit(1)
+
+# Main function for manage git credentials on notebook
+def git_creds():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('notebook_git_creds'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to manage git credentials for notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for creating image from notebook
+def create_image():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('common_create_notebook_image'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to create image from notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for deleting existing notebook image
+def terminate_image():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('common_terminate_notebook_image'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to create image from notebook node.", str(err))
+        sys.exit(1)
+
+
+# Main function for reconfiguring Spark for notebook
+
+# Main function for checking inactivity status
+def check_inactivity():
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
+                        level=logging.DEBUG,
+                        filename=local_log_filepath)
+
+    try:
+        local("~/scripts/{}.py".format('notebook_inactivity_check'))
+    except Exception as err:
+        traceback.print_exc()
+        append_result("Failed to check inactivity status.", str(err))
+        sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py b/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
new file mode 100644
index 0000000..7a1a359
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import argparse
+import json
+import sys
+from dlab.notebook_lib import *
+from dlab.actions_lib import *
+from dlab.fab import *
+import os
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--hostname', type=str, default='')
+parser.add_argument('--keyfile', type=str, default='')
+parser.add_argument('--region', type=str, default='')
+parser.add_argument('--os_user', type=str, default='')
+parser.add_argument('--dlab_path', type=str, default='')
+parser.add_argument('--keycloak_auth_server_url', type=str, default='')
+parser.add_argument('--keycloak_realm_name', type=str, default='')
+parser.add_argument('--keycloak_client_id', type=str, default='')
+parser.add_argument('--keycloak_client_secret', type=str, default='')
+parser.add_argument('--edge_instance_private_ip', type=str, default='')
+parser.add_argument('--edge_instance_public_ip', type=str, default='')
+parser.add_argument('--superset_name', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
+args = parser.parse_args()
+
+gitlab_certfile = os.environ['conf_gitlab_certfile']
+
+##############
+# Run script #
+##############
+if __name__ == "__main__":
+    print("Configure connections")
+    env['connection_attempts'] = 100
+    env.key_filename = [args.keyfile]
+    env.host_string = args.os_user + '@' + args.hostname
+
+    # PREPARE DISK
+    print("Prepare .ensure directory")
+    try:
+        if not exists('/home/' + args.os_user + '/.ensure_dir'):
+            sudo('mkdir /home/' + args.os_user + '/.ensure_dir')
+    except:
+        sys.exit(1)
+    #print("Mount additional volume")
+    #prepare_disk(args.os_user)
+
+    # INSTALL DOCKER COMPOSE
+    print("Installing docker compose")
+    if not ensure_docker_compose(args.os_user):
+        sys.exit(1)
+
+    # INSTALL UNGIT
+    print("Install nodejs")
+    install_nodejs(args.os_user)
+    print("Install ungit")
+    install_ungit(args.os_user, args.superset_name, args.edge_instance_private_ip)
+    if exists('/home/{0}/{1}'.format(args.os_user, gitlab_certfile)):
+        install_gitlab_cert(args.os_user, gitlab_certfile)
+
+        # INSTALL INACTIVITY CHECKER
+    print("Install inactivity checker")
+    install_inactivity_checker(args.os_user, args.ip_address)
+
+    # PREPARE SUPERSET
+    try:
+        configure_superset(args.os_user, args.keycloak_auth_server_url, args.keycloak_realm_name,
+                           args.keycloak_client_id, args.keycloak_client_secret, args.edge_instance_private_ip, args.edge_instance_public_ip, args.superset_name)
+    except:
+        sys.exit(1)
+
+
+
diff --git a/infrastructure-provisioning/src/superset/templates/.env b/infrastructure-provisioning/src/superset/templates/.env
new file mode 100644
index 0000000..2c8acf4
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/templates/.env
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+COMPOSE_PROJECT_NAME=superset
+GOOGLE_APPLICATION_CREDENTIALS=/home/superset/service_account.json
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/superset/templates/docker-compose.yml b/infrastructure-provisioning/src/superset/templates/docker-compose.yml
new file mode 100644
index 0000000..78283cb
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/templates/docker-compose.yml
@@ -0,0 +1,77 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+version: '2'
+services:
+  redis:
+    image: redis:3.2
+    restart: unless-stopped
+    ports:
+      - "127.0.0.1:6379:6379"
+    volumes:
+      - redis:/data
+
+  postgres:
+    image: postgres:10
+    restart: unless-stopped
+    environment:
+      POSTGRES_DB: superset
+      POSTGRES_PASSWORD: superset
+      POSTGRES_USER: superset
+    ports:
+      - "127.0.0.1:5432:5432"
+    volumes:
+      - postgres:/var/lib/postgresql/data
+
+  superset:
+    build:
+      context: ../../
+      dockerfile: contrib/docker/Dockerfile
+    restart: unless-stopped
+    environment:
+      POSTGRES_DB: superset
+      POSTGRES_USER: superset
+      POSTGRES_PASSWORD: superset
+      POSTGRES_HOST: postgres
+      POSTGRES_PORT: 5432
+      REDIS_HOST: redis
+      REDIS_PORT: 6379
+      USERNAME_OIDC_FIELD: preferred_username
+      FIRST_NAME_OIDC_FIELD: given_name
+      LAST_NAME_OIDC_FIELD: family_name
+      http_proxy: http://PROXY_STRING
+      https_proxy: http://PROXY_STRING
+      # If using production, comment development volume below
+      SUPERSET_ENV: production
+      #SUPERSET_ENV: development
+    user: root:root
+    ports:
+      - 8088:8088
+    depends_on:
+      - postgres
+      - redis
+    volumes:
+      # this is needed to communicate with the postgres and redis services
+      - ./superset_config.py:/home/superset/superset/superset_config.py
+      # this is needed for development, remove with SUPERSET_ENV=production
+      #- ../../superset:/home/superset/superset
+      - ./id_provider.json:/home/superset/superset/id_provider.json
+
+volumes:
+  postgres:
+    external: false
+  redis:
+    external: false
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/superset/templates/docker-init.sh b/infrastructure-provisioning/src/superset/templates/docker-init.sh
new file mode 100644
index 0000000..11b682a
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/templates/docker-init.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+set -ex
+
+# Create an admin user (you will be prompted to set username, first and last name before setting a password)
+export FLASK_APP=superset:app
+#flask fab create-admin --username admin --firstname admin --lastname admin --password admin
+
+# Initialize the database
+superset db upgrade
+
+if [ "$SUPERSET_LOAD_EXAMPLES" = "yes" ]; then
+    # Load some data to play with
+    superset load_examples
+fi
+
+# Create default roles and permissions
+superset init
diff --git a/infrastructure-provisioning/src/superset/templates/id_provider.json b/infrastructure-provisioning/src/superset/templates/id_provider.json
new file mode 100644
index 0000000..0269079
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/templates/id_provider.json
@@ -0,0 +1,12 @@
+{
+    "web": {
+        "issuer": "KEYCLOAK_AUTH_SERVER_URL/realms/KEYCLOAK_REALM_NAME",
+        "auth_uri": "KEYCLOAK_AUTH_SERVER_URL/realms/KEYCLOAK_REALM_NAME/protocol/openid-connect/auth",
+        "client_id": "CLIENT_ID",
+        "client_secret": "CLIENT_SECRET",
+        "token_uri": "KEYCLOAK_AUTH_SERVER_URL/realms/KEYCLOAK_REALM_NAME/protocol/openid-connect/token",
+        "token_introspection_uri": "KEYCLOAK_AUTH_SERVER_URL/realms/KEYCLOAK_REALM_NAME/protocol/openid-connect/token/introspect",
+        "userinfo_uri": "KEYCLOAK_AUTH_SERVER_URL/realms/KEYCLOAK_REALM_NAME/protocol/openid-connect/userinfo",
+        "ssl-required": "none"
+    }
+}
diff --git a/infrastructure-provisioning/src/superset/templates/requirements-extra.txt b/infrastructure-provisioning/src/superset/templates/requirements-extra.txt
new file mode 100644
index 0000000..6b6dcd7
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/templates/requirements-extra.txt
@@ -0,0 +1,19 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+gevent==1.4.0
+fab-oidc
+pybigquery
diff --git a/infrastructure-provisioning/src/superset/templates/superset_config.py b/infrastructure-provisioning/src/superset/templates/superset_config.py
new file mode 100644
index 0000000..b8830af
--- /dev/null
+++ b/infrastructure-provisioning/src/superset/templates/superset_config.py
@@ -0,0 +1,69 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import os
+from fab_oidc.security import SupersetOIDCSecurityManager
+from flask_appbuilder.security.manager import AUTH_OID
+
+def get_env_variable(var_name, default=None):
+    """Get the environment variable or raise exception."""
+    try:
+        return os.environ[var_name]
+    except KeyError:
+        if default is not None:
+            return default
+        else:
+            error_msg = 'The environment variable {} was missing, abort...'\
+                        .format(var_name)
+            raise EnvironmentError(error_msg)
+
+
+POSTGRES_USER = get_env_variable('POSTGRES_USER')
+POSTGRES_PASSWORD = get_env_variable('POSTGRES_PASSWORD')
+POSTGRES_HOST = get_env_variable('POSTGRES_HOST')
+POSTGRES_PORT = get_env_variable('POSTGRES_PORT')
+POSTGRES_DB = get_env_variable('POSTGRES_DB')
+
+# The SQLAlchemy connection string.
+SQLALCHEMY_DATABASE_URI = 'postgresql://%s:%s@%s:%s/%s' % (POSTGRES_USER,
+                                                           POSTGRES_PASSWORD,
+                                                           POSTGRES_HOST,
+                                                           POSTGRES_PORT,
+                                                           POSTGRES_DB)
+
+REDIS_HOST = get_env_variable('REDIS_HOST')
+REDIS_PORT = get_env_variable('REDIS_PORT')
+
+
+class CeleryConfig(object):
+    BROKER_URL = 'redis://%s:%s/0' % (REDIS_HOST, REDIS_PORT)
+    CELERY_IMPORTS = ('superset.sql_lab', )
+    CELERY_RESULT_BACKEND = 'redis://%s:%s/1' % (REDIS_HOST, REDIS_PORT)
+    CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}}
+    CELERY_TASK_PROTOCOL = 1
+
+
+CELERY_CONFIG = CeleryConfig
+
+AUTH_TYPE = AUTH_OID
+AUTH_USER_REGISTRATION = True
+AUTH_USER_REGISTRATION_ROLE = "Admin"
+CUSTOM_SECURITY_MANAGER = SupersetOIDCSecurityManager
+OIDC_CLIENT_SECRETS = '/home/superset/superset/id_provider.json'
+OIDC_COOKIE_SECURE = False
+OIDC_VALID_ISSUERS = 'KEYCLOAK_AUTH_SERVER_URL/realms/KEYCLOAK_REALM_NAME'
+WTF_CSRF_ENABLED = False
+OVERWRITE_REDIRECT_URI = 'http://EDGE_IP/SUPERSET_NAME/oidc_callback'
diff --git a/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py b/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
index fb579ad..93d8b55 100644
--- a/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
+++ b/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
@@ -40,7 +40,7 @@
 parser.add_argument('--rstudio_pass', type=str, default='')
 parser.add_argument('--rstudio_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -131,7 +131,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # POST INSTALLATION PROCESS
     print("Updating pyOpenSSL library")
diff --git a/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py b/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
index 43e220e..b595d9e 100644
--- a/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
+++ b/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
@@ -37,7 +37,7 @@
 parser.add_argument('--keyfile', type=str, default='')
 parser.add_argument('--region', type=str, default='')
 parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -134,7 +134,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py b/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
index 457b075..fda8b1f 100644
--- a/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
+++ b/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
@@ -50,7 +50,7 @@
 parser.add_argument('--multiple_clusters', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
 parser.add_argument('--endpoint_url', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -67,7 +67,7 @@
 else:
     spark_link = "https://archive.apache.org/dist/spark/spark-" + spark_version + "/spark-" + spark_version + \
                  "-bin-hadoop" + hadoop_version + ".tgz"
-zeppelin_interpreters = "md,python,livy,shell"
+zeppelin_interpreters = "md,python,shell"
 python3_version = "3.4"
 local_spark_path = '/opt/spark/'
 templates_dir = '/root/templates/'
@@ -88,7 +88,9 @@
             sudo('cp /opt/zeppelin/conf/zeppelin-site.xml.template /opt/zeppelin/conf/zeppelin-site.xml')
             sudo('sed -i \"/# export ZEPPELIN_PID_DIR/c\export ZEPPELIN_PID_DIR=/var/run/zeppelin\" /opt/zeppelin/conf/zeppelin-env.sh')
             sudo('sed -i \"/# export ZEPPELIN_IDENT_STRING/c\export ZEPPELIN_IDENT_STRING=notebook\" /opt/zeppelin/conf/zeppelin-env.sh')
+            sudo('sed -i \"/# export ZEPPELIN_INTERPRETER_DEP_MVNREPO/c\export ZEPPELIN_INTERPRETER_DEP_MVNREPO=https://repo1.maven.org/maven2\" /opt/zeppelin/conf/zeppelin-env.sh')
             sudo('sed -i \"/# export SPARK_HOME/c\export SPARK_HOME=\/opt\/spark/\" /opt/zeppelin/conf/zeppelin-env.sh')
+            sudo('sed -i \'s/127.0.0.1/0.0.0.0/g\' /opt/zeppelin/conf/zeppelin-site.xml')
             sudo('mkdir /var/log/zeppelin')
             sudo('mkdir /var/run/zeppelin')
             sudo('ln -s /var/log/zeppelin /opt/zeppelin-' + zeppelin_version + '-bin-netinst/logs')
@@ -97,6 +99,8 @@
             sudo('chown ' + os_user + ':' + os_user + ' -R /var/run/zeppelin')
             sudo('/opt/zeppelin/bin/install-interpreter.sh --name ' + zeppelin_interpreters + ' --proxy-url $http_proxy')
             sudo('chown ' + os_user + ':' + os_user + ' -R /opt/zeppelin-' + zeppelin_version + '-bin-netinst')
+            sudo('cp /opt/zeppelin-' + zeppelin_version + '-bin-netinst/interpreter/md/zeppelin-markdown-*.jar /opt/zeppelin/lib/interpreter/') # necessary when executing paragraph launches java process with "-cp :/opt/zeppelin/lib/interpreter/*:"
+            sudo('cp /opt/zeppelin-' + zeppelin_version + '-bin-netinst/interpreter/shell/zeppelin-shell-*.jar /opt/zeppelin/lib/interpreter/')
         except:
             sys.exit(1)
         try:
@@ -252,7 +256,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     if os.environ['notebook_r_enabled'] == 'true':
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/main/main.tf b/infrastructure-provisioning/terraform/aws/computational_resources/main/main.tf
new file mode 100644
index 0000000..c8bea60
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/main/main.tf
@@ -0,0 +1,118 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+provider "aws" {
+  access_key = var.access_key_id
+  secret_key = var.secret_access_key
+  region     = var.region
+}
+
+module "common" {
+  source        = "../modules/common"
+  sbn           = var.service_base_name
+  project_name  = var.project_name
+  project_tag   = var.project_tag
+  endpoint_tag  = var.endpoint_tag
+  user_tag      = var.user_tag
+  custom_tag    = var.custom_tag
+  notebook_name = var.notebook_name
+  region        = var.region
+  zone          = var.zone
+  product       = var.product_name
+  vpc           = var.vpc_id
+  cidr_range    = var.cidr_range
+  traefik_cidr  = var.traefik_cidr
+  instance_type = var.instance_type
+}
+
+module "notebook" {
+  source           = "../modules/notebook"
+  sbn              = var.service_base_name
+  project_name     = var.project_name
+  project_tag      = var.project_tag
+  endpoint_tag     = var.endpoint_tag
+  user_tag         = var.user_tag
+  custom_tag       = var.custom_tag
+  notebook_name    = var.notebook_name
+  subnet_id        = var.subnet_id
+  nb-sg_id         = var.nb-sg_id
+  iam_profile_name = var.iam_profile_name
+  product          = var.product_name
+  ami              = var.ami
+  instance_type    = var.instance_type
+  key_name         = var.key_name
+}
+
+module "data_engine" {
+  source           = "../modules/data_engine"
+  sbn              = var.service_base_name
+  project_name     = var.project_name
+  project_tag      = var.project_tag
+  endpoint_tag     = var.endpoint_tag
+  user_tag         = var.user_tag
+  custom_tag       = var.custom_tag
+  notebook_name    = var.notebook_name
+  subnet_id        = var.subnet_id
+  nb-sg_id         = var.nb-sg_id
+  iam_profile_name = var.iam_profile_name
+  product          = var.product_name
+  ami              = var.ami
+  instance_type    = var.instance_type
+  key_name         = var.key_name
+  cluster_name     = var.cluster_name
+  slave_count      = var.slave_count
+}
+
+module "emr" {
+  source           = "../modules/emr"
+  sbn              = var.service_base_name
+  project_name     = var.project_name
+  project_tag      = var.project_tag
+  endpoint_tag     = var.endpoint_tag
+  user_tag         = var.user_tag
+  custom_tag       = var.custom_tag
+  notebook_name    = var.notebook_name
+  subnet_id        = var.subnet_id
+  nb-sg_id         = var.nb-sg_id
+  iam_profile_name = var.iam_profile_name
+  product          = var.product_name
+  ami              = var.ami
+  emr_template     = var.emr_template
+  master_shape     = var.master_shape
+  slave_shape      = var.slave_shape
+  key_name         = var.key_name
+  cluster_name     = var.cluster_name
+  instance_count   = var.instance_count
+  bid_price        = var.bid_price
+}
+
+module "ami" {
+  source             = "../modules/ami"
+  sbn                = var.service_base_name
+  project_name       = var.project_name
+  source_instance_id = var.source_instance_id
+  project_tag        = var.project_tag
+  notebook_name      = var.notebook_name
+  product            = var.product_name
+  endpoint_tag       = var.endpoint_tag
+  user_tag           = var.user_tag
+  custom_tag         = var.custom_tag
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/main/variables.tf b/infrastructure-provisioning/terraform/aws/computational_resources/main/variables.tf
new file mode 100644
index 0000000..25f322b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/main/variables.tf
@@ -0,0 +1,78 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "access_key_id" {}
+
+variable "secret_access_key" {}
+
+variable "service_base_name" {}
+
+variable "project_name" {}
+
+variable "project_tag" {}
+
+variable "endpoint_tag" {}
+
+variable "user_tag" {}
+
+variable "custom_tag" {}
+
+variable "notebook_name" {}
+
+variable "region" {}
+
+variable "zone" {}
+
+variable "product_name" {}
+
+variable "vpc_id" {}
+
+variable "subnet_id" {}
+
+variable "nb-sg_id" {}
+
+variable "iam_profile_name" {}
+
+variable "cidr_range" {}
+
+variable "traefik_cidr" {}
+
+variable "ami" {}
+
+variable "instance_type" {}
+
+variable "key_name" {}
+
+variable "cluster_name" {}
+
+variable "slave_count" {}
+
+variable "emr_template" {}
+
+variable "master_shape" {}
+
+variable "slave_shape" {}
+
+variable "instance_count" {}
+
+variable "bid_price" {}
+
+variable "source_instance_id" {}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
similarity index 61%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
index 951fdd7..aac7afb 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
@@ -19,22 +19,22 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+locals {
+  ami_name = "${var.sbn}-ami"
+}
 
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+resource "aws_ami_from_instance" "ami" {
+  name               = "${var.project_tag}-${var.notebook_name}-ami"
+  source_instance_id = var.source_instance_id
+  tags {
+    Name             = local.ami_name
+    "${var.sbn}-tag" = local.ami_name
+    Product          = var.product
+    Project_name     = var.project_name
+    Project_tag      = var.project_tag
+    Endpoint_tag     = var.endpoint_tag
+    "user:tag"       = "${var.sbn}:${local.ami_name}"
+    User_tag         = var.user_tag
+    Custom_tag       = var.custom_tag
+  }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
new file mode 100644
index 0000000..259bb6c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
@@ -0,0 +1,118 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  role_name    = "${var.sbn}-nb-de-Role"
+  role_profile = "${var.sbn}-nb-Profile"
+  policy_name  = "${var.sbn}-strict_to_S3-Policy"
+}
+
+resource "aws_iam_role" "nb_de_role" {
+  name               = local.role_name
+  assume_role_policy = <<EOF
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Action": "sts:AssumeRole",
+      "Principal": {
+        "Service": "ec2.amazonaws.com"
+      },
+      "Effect": "Allow",
+      "Sid": ""
+    }
+  ]
+}
+EOF
+
+  tags = {
+    Name             = local.role_name
+    Environment_tag  = var.sbn
+    "${var.sbn}-tag" = local.role_name
+    Product          = var.product
+    Project_name     = var.project_name
+    Project_tag      = var.project_tag
+    Endpoint_tag     = var.endpoint_tag
+    "user:tag"       = "${var.sbn}:${local.role_name}"
+    User_tag         = var.user_tag
+    Custom_tag       = var.custom_tag
+  }
+}
+
+resource "aws_iam_instance_profile" "nb_profile" {
+  name = local.role_profile
+  role = aws_iam_role.nb_de_role.name
+}
+
+resource "aws_iam_policy" "strict_S3_policy" {
+  name = local.policy_name
+  description = "Strict Bucket only policy"
+  policy = <<EOF
+{
+    "Version": "2012-10-17",
+    "Statement": [
+        {
+            "Effect": "Allow",
+            "Action": "s3:ListAllMyBuckets",
+            "Resource": "arn:aws:s3:::*"
+        },
+        {
+            "Effect": "Allow",
+            "Action": [
+                "s3:ListBucket",
+                "s3:GetBucketLocation",
+                "s3:PutBucketPolicy",
+                "s3:PutEncryptionConfiguration"
+            ],
+            "Resource": [
+                "arn:aws:s3:::${var.sbn}*"
+            ]
+        },
+        {
+            "Effect": "Allow",
+            "Action": [
+                "s3:GetObject",
+                "s3:HeadObject"
+            ],
+            "Resource": "arn:aws:s3:::${var.sbn}-ssn-bucket/*"
+        },
+        {
+            "Effect": "Allow",
+            "Action": [
+                "s3:HeadObject",
+                "s3:PutObject",
+                "s3:GetObject",
+                "s3:DeleteObject"
+            ],
+            "Resource": [
+                "arn:aws:s3:::${var.sbn}-bucket/*",
+                "arn:aws:s3:::${var.sbn}-shared-bucket/*"
+            ]
+        }
+    ]
+}
+EOF
+}
+
+resource "aws_iam_role_policy_attachment" "strict_S3_policy-attach" {
+  role       = aws_iam_role.nb_de_role.name
+  policy_arn = aws_iam_policy.strict_S3_policy.arn
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
new file mode 100644
index 0000000..2b3c1fb
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
@@ -0,0 +1,80 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  subnet_name = "${var.sbn}-subnet"
+  sg_name     = "${var.sbn}-nb-sg" #sg - security group
+}
+
+resource "aws_subnet" "subnet" {
+  vpc_id     = var.vpc
+  cidr_block = var.cidr_range
+
+  tags = {
+    Name             = local.subnet_name
+    "${var.sbn}-tag" = local.subnet_name
+    Product          = var.product
+    Project_name     = var.project_name
+    Project_tag      = var.project_tag
+    Endpoint_tag     = var.endpoint_tag
+    "user:tag"       = "${var.sbn}:${local.subnet_name}"
+    User_tag         = var.user_tag
+    Custom_tag       = var.custom_tag
+  }
+}
+
+resource "aws_security_group" "nb-sg" {
+  name   = local.sg_name
+  vpc_id = var.vpc
+
+  ingress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["${var.cidr_range}", "${var.traefik_cidr}"]
+  }
+
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 443
+    to_port     = 443
+    protocol    = "TCP"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  tags = {
+    Name             = local.sg_name
+    "${var.sbn}-tag" = local.sg_name
+    Product          = var.product
+    Project_name     = var.project_name
+    Project_tag      = var.project_tag
+    Endpoint_tag     = var.endpoint_tag
+    "user:tag"       = "${var.sbn}:${local.sg_name}"
+    User_tag         = var.user_tag
+    Custom_tag       = var.custom_tag
+  }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
new file mode 100644
index 0000000..12532c1
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
@@ -0,0 +1,71 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  cluster_name  = "${var.sbn}-de-${var.notebook_name}-${var.cluster_name}"
+  notebook_name = "${var.sbn}-nb-${var.notebook_name}"
+}
+
+resource "aws_instance" "master" {
+  ami                  = var.ami
+  instance_type        = var.instance_type
+  key_name             = var.key_name
+  subnet_id            = var.subnet_id
+  security_groups      = ["${var.nb-sg_id}"]
+  iam_instance_profile = var.iam_profile_name
+  tags = {
+    Name                     = "${local.cluster_name}-m"
+    Type                     = "master"
+    dataengine_notebook_name = local.notebook_name
+    "${var.sbn}-tag"         = "${local.cluster_name}-m"
+    Product                  = var.product
+    Project_name             = var.project_name
+    Project_tag              = var.project_tag
+    User_tag                 = var.user_tag
+    Endpoint_Tag             = var.endpoint_tag
+    "user:tag"               = "${var.sbn}:${local.cluster_name}"
+    Custom_Tag               = var.custom_tag
+  }
+}
+
+
+resource "aws_instance" "slave" {
+  count                = var.slave_count
+  ami                  = var.ami
+  instance_type        = var.instance_type
+  key_name             = var.key_name
+  subnet_id            = var.subnet_id
+  security_groups      = ["${var.nb-sg_id}"]
+  iam_instance_profile = var.iam_profile_name
+  tags = {
+    Name                     = "${local.cluster_name}-s${count.index + 1}"
+    Type                     = "slave"
+    dataengine_notebook_name = local.notebook_name
+    "${var.sbn}-tag"         = "${local.cluster_name}-s${count.index + 1}"
+    Product                  = var.product
+    Project_name             = var.project_name
+    Project_tag              = var.project_tag
+    User_tag                 = var.user_tag
+    Endpoint_Tag             = var.endpoint_tag
+    "user:tag"               = "${var.sbn}:${local.cluster_name}"
+    Custom_Tag               = var.custom_tag
+  }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
new file mode 100644
index 0000000..10f5506
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
@@ -0,0 +1,82 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  cluster_name  = "${var.sbn}-des-${var.notebook_name}-${var.cluster_name}"
+  notebook_name = "${var.sbn}-nb-${var.notebook_name}"
+}
+
+resource "aws_emr_cluster" "cluster" {
+  name          = local.cluster_name
+  release_label = var.emr_template
+  applications  = ["Spark"]
+
+  termination_protection            = false
+  keep_job_flow_alive_when_no_steps = true
+
+  ec2_attributes {
+    subnet_id                         = var.subnet_id
+    emr_managed_master_security_group = var.nb-sg_id
+    emr_managed_slave_security_group  = var.nb-sg_id
+    instance_profile                  = "arn:aws:iam::203753054073:instance-profile/EMR_EC2_DefaultRole"
+  }
+
+  master_instance_group {
+    instance_type = var.master_shape
+  }
+
+  core_instance_group {
+    instance_type  = var.slave_shape
+    instance_count = "${var.instance_count - 1}"
+
+    ebs_config {
+      size                 = "40"
+      type                 = "gp2"
+      volumes_per_instance = 1
+    }
+
+    bid_price = "0.${var.bid_price}"
+  }
+
+  ebs_root_volume_size = 100
+
+  tags = {
+    ComputationalName        = var.cluster_name
+    Name                     = local.cluster_name
+    Notebook                 = local.notebook_name
+    Product                  = var.product
+    "${var.sbn}-tag"         = local.cluster_name
+    Project_name             = var.project_name
+    Project_tag              = var.project_tag
+    User_tag                 = var.user_tag
+    Endpoint_Tag             = var.endpoint_tag
+    "user:tag"               = "${var.sbn}:${local.cluster_name}"
+    Custom_Tag               = var.custom_tag
+  }
+
+  bootstrap_action {
+    path = "s3://elasticmapreduce/bootstrap-actions/run-if"
+    name = "runif"
+    args = ["instance.isMaster=true", "echo running on master node"]
+  }
+
+  service_role = "arn:aws:iam::203753054073:role/EMR_DefaultRole"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
new file mode 100644
index 0000000..64d1d4f
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
@@ -0,0 +1,44 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  node_name = "${var.sbn}-nb-${var.notebook_name}"
+}
+
+resource "aws_instance" "notebook" {
+  ami                  = var.ami
+  instance_type        = var.instance_type
+  key_name             = var.key_name
+  subnet_id            = var.subnet_id
+  security_groups      = ["${var.nb-sg_id}"]
+  iam_instance_profile = var.iam_profile_name
+  tags = {
+    Name             = local.node_name
+    "${var.sbn}-tag" = local.node_name
+    Project_name     = var.project_name
+    Project_tag      = var.project_tag
+    Endpoint_Tag     = var.endpoint_tag
+    "user:tag"       = "${var.sbn}:${local.node_name}"
+    Product          = var.product
+    User_Tag         = var.user_tag
+    Custom_Tag       = var.custom_tag
+  }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf
new file mode 100644
index 0000000..8a930e0
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf
@@ -0,0 +1,37 @@
+# *****************************************************************************
+ #
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing,
+ # software distributed under the License is distributed on an
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ # KIND, either express or implied.  See the License for the
+ # specific language governing permissions and limitations
+ # under the License.
+ #
+ # ******************************************************************************
+
+ locals {
+   shared_s3_name = "${var.service_base_name}-${var.endpoint_id}-shared-bucket"
+ }
+
+ resource "aws_s3_bucket" "shared_bucket" {
+   bucket = local.shared_s3_name
+   acl    = "private"
+   tags   = {
+     Name                           = local.shared_s3_name
+     "${local.additional_tag[0]}"   = local.additional_tag[1]
+     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.shared_s3_name}"
+     "${var.service_base_name}-tag" = local.shared_s3_name
+     "endpoint_tag"                 = var.endpoint_id
+   }
+   force_destroy = true
+ }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
new file mode 100644
index 0000000..e4c1e69
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
@@ -0,0 +1,56 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  endpoint_role_name    = "${var.service_base_name}-${var.endpoint_id}-role"
+  endpoint_role_profile = "${var.service_base_name}-${var.endpoint_id}-profile"
+  endpoint_policy_name  = "${var.service_base_name}-${var.endpoint_id}-policy"
+}
+
+data "template_file" "endpoint_policy" {
+  template = file("./files/endpoint-policy.json")
+}
+
+resource "aws_iam_role" "endpoint_role" {
+  name               = local.endpoint_role_name
+  assume_role_policy = file("./files/assume-policy.json")
+  tags = {
+    Name = local.endpoint_role_name
+    "${local.additional_tag[0]}" = local.additional_tag[1]
+    "${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_role_name}"
+    "${var.service_base_name}-tag" = local.endpoint_role_name
+  }
+}
+
+resource "aws_iam_instance_profile" "endpoint_profile" {
+  name = local.endpoint_role_profile
+  role = aws_iam_role.endpoint_role.name
+}
+
+resource "aws_iam_policy" "endpoint_policy" {
+  name   = local.endpoint_policy_name
+  policy = data.template_file.endpoint_policy.rendered
+}
+
+resource "aws_iam_role_policy_attachment" "endpoint_policy_attach" {
+  role       = aws_iam_role.endpoint_role.name
+  policy_arn = aws_iam_policy.endpoint_policy.arn
+}
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
new file mode 100644
index 0000000..6bfc09b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
@@ -0,0 +1,51 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  endpoint_instance_name = "${var.service_base_name}-${var.endpoint_id}-endpoint"
+}
+
+resource "aws_instance" "endpoint" {
+  ami                  = var.ami
+  instance_type        = var.endpoint_instance_shape
+  key_name             = var.key_name
+  subnet_id            = data.aws_subnet.data_subnet.id
+  security_groups      = [aws_security_group.endpoint_sec_group.id]
+  iam_instance_profile = aws_iam_instance_profile.endpoint_profile.name
+  root_block_device {
+    volume_type           = "gp2"
+    volume_size           = var.endpoint_volume_size
+    delete_on_termination = true
+  }
+  tags = {
+    Name                           = local.endpoint_instance_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_instance_name}"
+    "${var.service_base_name}-tag" = local.endpoint_instance_name
+    endpoint_id                    = var.endpoint_id
+  }
+}
+
+resource "aws_eip_association" "e_ip_assoc" {
+  instance_id   = aws_instance.endpoint.id
+  allocation_id = aws_eip.endpoint_eip.id
+  count         = var.network_type == "public" ? 1 : 0
+}
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
new file mode 100644
index 0000000..eea071b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
@@ -0,0 +1,164 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  endpoint_subnet_name       = "${var.service_base_name}-${var.endpoint_id}-subnet"
+  endpoint_sg_name           = "${var.service_base_name}-${var.endpoint_id}-sg"
+  endpoint_vpc_name          = "${var.service_base_name}-${var.endpoint_id}-vpc"
+  additional_tag             = split(":", var.additional_tag)
+  endpoint_igw_name          = "${var.service_base_name}-${var.endpoint_id}-igw"
+  endpoint_ip_name           = "${var.service_base_name}-${var.endpoint_id}-static-ip"
+  projects_rt                = "${var.service_base_name}-${var.endpoint_id}-project-rt"
+}
+
+
+resource "aws_vpc" "vpc_create" {
+  cidr_block           = var.vpc_cidr
+  count                = var.vpc_id == "" ? 1 : 0
+  instance_tenancy     = "default"
+  enable_dns_hostnames = true
+  enable_dns_support   = true
+  tags                 = {
+    Name                              = local.endpoint_vpc_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_vpc_name}"
+    "${var.service_base_name}-tag"    = local.endpoint_vpc_name
+  }
+}
+
+data "aws_vpc" "data_vpc" {
+  id = var.vpc_id == "" ? aws_vpc.vpc_create.0.id : var.vpc_id
+}
+
+resource "aws_internet_gateway" "gw" {
+  vpc_id = aws_vpc.vpc_create.0.id
+  count  = var.vpc_id == "" ? 1 : 0
+  tags   = {
+    Name                           = local.endpoint_igw_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_igw_name}"
+    "${var.service_base_name}-tag" = local.endpoint_igw_name
+  }
+}
+
+resource "aws_subnet" "endpoint_subnet" {
+  vpc_id            = aws_vpc.vpc_create.0.id
+  cidr_block        = var.subnet_cidr
+  availability_zone = "${var.region}${var.zone}"
+  tags              = {
+    Name                           = local.endpoint_subnet_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_subnet_name}"
+    "${var.service_base_name}-tag" = local.endpoint_subnet_name
+  }
+  count = var.vpc_id == "" ? 1 : 0
+}
+
+data "aws_subnet" "data_subnet" {
+  id = var.subnet_id == "" ? aws_subnet.endpoint_subnet.0.id : var.subnet_id
+}
+
+resource "aws_route" "route" {
+  count                     = var.vpc_id == "" ? 1 : 0
+  route_table_id            = aws_vpc.vpc_create.0.main_route_table_id
+  destination_cidr_block    = "0.0.0.0/0"
+  gateway_id                = aws_internet_gateway.gw.0.id
+}
+
+resource "aws_security_group" "endpoint_sec_group" {
+  name        = local.endpoint_sg_name
+  vpc_id      = data.aws_vpc.data_vpc.id
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port   = 8084
+    to_port     = 8084
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port   = 8085
+    to_port     = 8085
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port   = 4822
+    to_port     = 4822
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port   = 8088
+    to_port     = 8088
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  tags = {
+    Name                           = local.endpoint_sg_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_sg_name}"
+    "${var.service_base_name}-tag" = local.endpoint_sg_name
+  }
+}
+
+resource "aws_eip" "endpoint_eip" {
+  vpc      = true
+  tags = {
+    Name                           = local.endpoint_ip_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_ip_name}"
+    "${var.service_base_name}-tag" = local.endpoint_ip_name
+  }
+}
+
+resource "aws_route_table" "projects_route_table" {
+  vpc_id = data.aws_vpc.data_vpc.id
+  tags   = {
+    Name                           = local.projects_rt
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.projects_rt}"
+    "${var.service_base_name}-tag" = local.projects_rt
+    "${var.service_base_name}-tag" = var.service_base_name
+  }
+}
+
+resource "aws_vpc_endpoint" "s3-endpoint" {
+  vpc_id          = data.aws_vpc.data_vpc.id
+  service_name    = "com.amazonaws.${var.region}.s3"
+  route_table_ids = [aws_route_table.projects_route_table.id]
+}
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/endpoint/main/outputs.tf
similarity index 75%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/endpoint/main/outputs.tf
index 16da950..4586fd2 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/outputs.tf
@@ -19,19 +19,18 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+output "endpoint_eip_address" {
+  value = aws_eip.endpoint_eip.public_ip
+}
 
+output "subnet_id" {
+  value = data.aws_subnet.data_subnet.id
+}
 
-USER root
+output "vpc_id" {
+  value = data.aws_vpc.data_vpc.id
+}
 
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+output "ssn_k8s_sg_id" {
+  value = aws_security_group.endpoint_sec_group.id
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/variables.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/variables.tf
new file mode 100644
index 0000000..798ddb8
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/variables.tf
@@ -0,0 +1,103 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "service_base_name" {}
+
+variable "access_key_id" {
+  default = ""
+}
+variable "secret_access_key" {
+  default = ""
+}
+
+variable "region" {}
+
+variable "zone" {}
+
+variable "product" {}
+
+variable "subnet_cidr" {}
+
+variable "endpoint_instance_shape" {}
+
+variable "key_name" {}
+
+variable "ami" {
+  default = "ami-07b4f3c02c7f83d59"
+}
+
+variable "vpc_id" {
+  default = ""
+}
+
+variable "subnet_id" {
+  default = ""
+}
+
+variable "network_type" {}
+
+variable "vpc_cidr" {}
+
+variable "endpoint_volume_size" {}
+
+variable "endpoint_id" {}
+
+variable "ssn_k8s_sg_id" {
+  default = ""
+}
+
+variable "ldap_host" {}
+
+variable "ldap_dn" {}
+
+variable "ldap_user" {}
+
+variable "ldap_bind_creds" {}
+
+variable "ldap_users_group" {}
+
+variable "additional_tag" {
+  default = "product:dlab"
+}
+
+variable "tag_resource_id" {
+  default = "user:tag"
+}
+
+variable "billing_enable" {}
+
+variable "mongo_password" {}
+
+variable "mongo_host" {}
+
+variable "billing_bucket" {}
+
+variable "report_path" {
+  default = ""
+}
+
+variable "aws_job_enabled" {
+  default = "false"
+}
+
+variable "billing_aws_account_id" {}
+
+variable "billing_tag" {}
diff --git a/infrastructure-provisioning/terraform/aws/project/main/iam.tf b/infrastructure-provisioning/terraform/aws/project/main/iam.tf
new file mode 100644
index 0000000..5aa83e2
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/project/main/iam.tf
@@ -0,0 +1,108 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  edge_role_name    = "${var.service_base_name}-edge-role"
+  edge_role_profile = "${var.service_base_name}-edge-profile"
+  edge_policy_name  = "${var.service_base_name}-edge-policy"
+  nb_role_name      = "${var.service_base_name}-nb-de-Role"
+  nb_role_profile   = "${var.service_base_name}-nb-Profile"
+  nb_policy_name    = "${var.service_base_name}-strict_to_S3-Policy"
+}
+
+data "template_file" "edge_policy" {
+  template = file("./files/edge-policy.json")
+}
+
+data "template_file" "nb_policy" {
+  template = file("./files/nb-policy.json")
+  vars = {
+    sbn = var.service_base_name
+  }
+}
+
+#################
+### Edge node ###
+#################
+
+resource "aws_iam_role" "edge_role" {
+  name               = local.edge_role_name
+  assume_role_policy = file("./files/edge-assume-policy.json")
+  tags = {
+    Name = "${local.edge_role_name}"
+    "${local.additional_tag[0]}" = local.additional_tag[1]
+    "${var.tag_resource_id}" = "${var.service_base_name}:${local.edge_role_name}"
+    "${var.service_base_name}-tag" = local.edge_role_name
+  }
+}
+
+resource "aws_iam_instance_profile" "edge_profile" {
+  name = local.edge_role_profile
+  role = aws_iam_role.edge_role.name
+}
+
+resource "aws_iam_policy" "edge_policy" {
+  name   = local.edge_policy_name
+  policy = data.template_file.edge_policy.rendered
+}
+
+resource "aws_iam_role_policy_attachment" "edge_policy_attach" {
+  role       = aws_iam_role.edge_role.name
+  policy_arn = aws_iam_policy.edge_policy.arn
+}
+
+############################################################
+### Explotratory environment and computational resources ###
+############################################################
+
+resource "aws_iam_role" "nb_de_role" {
+  name               = local.nb_role_name
+  assume_role_policy = file("./files/nb-assume-policy.json")
+
+  tags = {
+    Name                           = local.nb_role_name
+    Environment_tag                = var.service_base_name
+    "${var.service_base_name}-tag" = local.nb_role_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    Project_name                   = var.project_name
+    Project_tag                    = var.project_tag
+    Endpoint_tag                   = var.endpoint_tag
+    "user:tag"                     = "${var.service_base_name}:${local.nb_role_name}"
+    User_tag                       = var.user_tag
+    Custom_tag                     = var.custom_tag
+  }
+}
+
+resource "aws_iam_instance_profile" "nb_profile" {
+  name = local.nb_role_profile
+  role = aws_iam_role.nb_de_role.name
+}
+
+resource "aws_iam_policy" "nb_policy" {
+  name = local.nb_policy_name
+  description = "Strict Bucket only policy"
+  policy = data.template_file.nb_policy.rendered
+}
+
+resource "aws_iam_role_policy_attachment" "nb_policy-attach" {
+  role       = aws_iam_role.nb_de_role.name
+  policy_arn = aws_iam_policy.nb_policy.arn
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/project/main/instance.tf b/infrastructure-provisioning/terraform/aws/project/main/instance.tf
new file mode 100644
index 0000000..1220743
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/project/main/instance.tf
@@ -0,0 +1,50 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  edge_instance_name = "${var.service_base_name}-edge"
+}
+
+resource "aws_instance" "edge" {
+  ami                  = var.ami
+  instance_type        = var.instance_type
+  key_name             = var.key_name
+  subnet_id            = var.subnet_id
+  security_groups      = [aws_security_group.edge_sg.id]
+  iam_instance_profile = aws_iam_instance_profile.edge_profile.id
+  root_block_device {
+    volume_type           = "gp2"
+    volume_size           = var.edge_volume_size
+    delete_on_termination = true
+  }
+  tags = {
+    Name                           = local.edge_instance_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.edge_instance_name}"
+    "${var.service_base_name}-tag" = local.edge_instance_name
+    "Endpoint_tag"                 = var.endpoint_tag
+  }
+}
+
+resource "aws_eip_association" "edge_ip_assoc" {
+  instance_id   = aws_instance.edge.id
+  allocation_id = aws_eip.edge_ip.id
+}
diff --git a/infrastructure-provisioning/terraform/aws/project/main/network.tf b/infrastructure-provisioning/terraform/aws/project/main/network.tf
new file mode 100644
index 0000000..aac8339
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/project/main/network.tf
@@ -0,0 +1,275 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  edge_sg_name     = "${var.service_base_name}-${var.project_name}-edge-sg"
+  edge_ip_name     = "${var.service_base_name}-${var.project_name}-edge-EIP"
+  additional_tag   = split(":", var.additional_tag)
+  nb_subnet_name   = "${var.service_base_name}-${var.project_name}-nb-subnet"
+  sg_name          = "${var.service_base_name}-${var.project_name}-nb-sg" #sg - security group
+  sbn              = var.service_base_name
+}
+
+#################
+### Edge node ###
+#################
+
+resource "aws_eip" "edge_ip" {
+  vpc  = true
+  tags = {
+    Name                           = local.edge_ip_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.edge_ip_name}"
+    "${var.service_base_name}-tag" = local.edge_ip_name
+  }
+}
+
+resource "aws_security_group" "edge_sg" {
+  name        = local.edge_sg_name
+  vpc_id      = var.vpc_id
+
+  ingress {
+    from_port = 0
+    protocol = "-1"
+    to_port = 0
+    cidr_blocks = [var.nb_cidr, var.edge_cidr]
+  }
+
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port   = 8080
+    to_port     = 8080
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port   = 80
+    to_port     = 80
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port   = 3128
+    to_port     = 3128
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 80
+    to_port     = 80
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port = 8080
+    protocol = "tcp"
+    to_port = 8080
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 6006
+    protocol = "tcp"
+    to_port = 6006
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 8085
+    protocol = "tcp"
+    to_port = 8085
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 18080
+    protocol = "tcp"
+    to_port = 18080
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 8088
+    protocol = "tcp"
+    to_port = 8088
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 4040
+    protocol = "tcp"
+    to_port = 4140
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 50070
+    protocol = "tcp"
+    to_port = 50070
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 8888
+    protocol = "tcp"
+    to_port = 8888
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 8042
+    protocol = "tcp"
+    to_port = 8042
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 20888
+    protocol = "tcp"
+    to_port = 20888
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 8787
+    protocol = "tcp"
+    to_port = 8787
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port = 8081
+    protocol = "tcp"
+    to_port = 8081
+    cidr_blocks = [var.nb_cidr]
+  }
+
+  egress {
+    from_port   = 53
+    to_port     = 53
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 389
+    to_port     = 389
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 123
+    to_port     = 123
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 443
+    to_port     = 443
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  tags = {
+    Name                           = local.edge_sg_name
+    "${local.additional_tag[0]}"   = local.additional_tag[1]
+    "${var.tag_resource_id}"       = "${var.service_base_name}:${local.edge_sg_name}"
+    "${var.service_base_name}-tag" = local.edge_sg_name
+  }
+}
+
+############################################################
+### Explotratory environment and computational resources ###
+############################################################
+
+resource "aws_subnet" "private_subnet" {
+  vpc_id     = var.vpc_id
+  cidr_block = var.nb_cidr
+
+  tags = {
+    Name                         = local.nb_subnet_name
+    "${local.sbn}-tag"           = local.nb_subnet_name
+    "${local.additional_tag[0]}" = local.additional_tag[1]
+    Project_name                 = var.project_name
+    Project_tag                  = var.project_tag
+    Endpoint_tag                 = var.endpoint_tag
+    "user:tag"                   = "${local.sbn}:${local.nb_subnet_name}"
+    User_tag                     = var.user_tag
+    Custom_tag                   = var.custom_tag
+  }
+}
+
+resource "aws_security_group" "nb-sg" {
+  name   = local.sg_name
+  vpc_id = var.vpc_id
+
+  ingress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = [var.nb_cidr, var.edge_cidr]
+  }
+
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  egress {
+    from_port   = 443
+    to_port     = 443
+    protocol    = "TCP"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  tags = {
+    Name                         = local.sg_name
+    "${local.sbn}-tag"           = local.sg_name
+    "${local.additional_tag[0]}" = local.additional_tag[1]
+    Project_name                 = var.project_name
+    Project_tag                  = var.project_tag
+    Endpoint_tag                 = var.endpoint_tag
+    "user:tag"                   = "${local.sbn}:${local.sg_name}"
+    User_tag                     = var.user_tag
+    Custom_tag                   = var.custom_tag
+  }
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/Chart.yaml
index 16da950..039e6d0 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: cert-manager-crd
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..b5ada58
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cert-manager-crd.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cert-manager-crd.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cert-manager-crd.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "cert-manager-crd.labels" -}}
+app.kubernetes.io/name: {{ include "cert-manager-crd.name" . }}
+helm.sh/chart: {{ include "cert-manager-crd.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/crd.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/crd.yaml
new file mode 100644
index 0000000..c2d6a4c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/templates/crd.yaml
@@ -0,0 +1,1449 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: certificates.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.conditions[?(@.type=="Ready")].status
+    name: Ready
+    type: string
+  - JSONPath: .spec.secretName
+    name: Secret
+    type: string
+  - JSONPath: .spec.issuerRef.name
+    name: Issuer
+    priority: 1
+    type: string
+  - JSONPath: .status.conditions[?(@.type=="Ready")].message
+    name: Status
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: Certificate
+    plural: certificates
+    shortNames:
+    - cert
+    - certs
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            acme:
+              description: ACME contains configuration specific to ACME Certificates.
+                Notably, this contains details on how the domain names listed on this
+                Certificate resource should be 'solved', i.e. mapping HTTP01 and DNS01
+                providers to DNS names.
+              properties:
+                config:
+                  items:
+                    properties:
+                      domains:
+                        description: Domains is the list of domains that this SolverConfig
+                          applies to.
+                        items:
+                          type: string
+                        type: array
+                    required:
+                    - domains
+                    type: object
+                  type: array
+              required:
+              - config
+              type: object
+            commonName:
+              description: CommonName is a common name to be used on the Certificate.
+                If no CommonName is given, then the first entry in DNSNames is used
+                as the CommonName. The CommonName should have a length of 64 characters
+                or fewer to avoid generating invalid CSRs; in order to have longer
+                domain names, set the CommonName (or first DNSNames entry) to have
+                64 characters or fewer, and then add the longer domain name to DNSNames.
+              type: string
+            dnsNames:
+              description: DNSNames is a list of subject alt names to be used on the
+                Certificate. If no CommonName is given, then the first entry in DNSNames
+                is used as the CommonName and must have a length of 64 characters
+                or fewer.
+              items:
+                type: string
+              type: array
+            duration:
+              description: Certificate default Duration
+              type: string
+            ipAddresses:
+              description: IPAddresses is a list of IP addresses to be used on the
+                Certificate
+              items:
+                type: string
+              type: array
+            isCA:
+              description: IsCA will mark this Certificate as valid for signing. This
+                implies that the 'signing' usage is set
+              type: boolean
+            issuerRef:
+              description: IssuerRef is a reference to the issuer for this certificate.
+                If the 'kind' field is not set, or set to 'Issuer', an Issuer resource
+                with the given name in the same namespace as the Certificate will
+                be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer
+                with the provided name will be used. The 'name' field in this stanza
+                is required at all times.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+            keyAlgorithm:
+              description: KeyAlgorithm is the private key algorithm of the corresponding
+                private key for this certificate. If provided, allowed values are
+                either "rsa" or "ecdsa" If KeyAlgorithm is specified and KeySize is
+                not provided, key size of 256 will be used for "ecdsa" key algorithm
+                and key size of 2048 will be used for "rsa" key algorithm.
+              enum:
+              - rsa
+              - ecdsa
+              type: string
+            keyEncoding:
+              description: KeyEncoding is the private key cryptography standards (PKCS)
+                for this certificate's private key to be encoded in. If provided,
+                allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8,
+                respectively. If KeyEncoding is not specified, then PKCS#1 will be
+                used by default.
+              type: string
+            keySize:
+              description: KeySize is the key bit size of the corresponding private
+                key for this certificate. If provided, value must be between 2048
+                and 8192 inclusive when KeyAlgorithm is empty or is set to "rsa",
+                and value must be one of (256, 384, 521) when KeyAlgorithm is set
+                to "ecdsa".
+              format: int64
+              type: integer
+            organization:
+              description: Organization is the organization to be used on the Certificate
+              items:
+                type: string
+              type: array
+            renewBefore:
+              description: Certificate renew before expiration duration
+              type: string
+            secretName:
+              description: SecretName is the name of the secret resource to store
+                this secret in
+              type: string
+          required:
+          - secretName
+          - issuerRef
+          type: object
+        status:
+          properties:
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+            lastFailureTime:
+              format: date-time
+              type: string
+            notAfter:
+              description: The expiration time of the certificate stored in the secret
+                named by this resource in spec.secretName.
+              format: date-time
+              type: string
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: certificaterequests.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.conditions[?(@.type=="Ready")].status
+    name: Ready
+    type: string
+  - JSONPath: .spec.issuerRef.name
+    name: Issuer
+    priority: 1
+    type: string
+  - JSONPath: .status.conditions[?(@.type=="Ready")].message
+    name: Status
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: CertificateRequest
+    plural: certificaterequests
+    shortNames:
+    - cr
+    - crs
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            csr:
+              description: Byte slice containing the PEM encoded CertificateSigningRequest
+              format: byte
+              type: string
+            duration:
+              description: Requested certificate default Duration
+              type: string
+            isCA:
+              description: IsCA will mark the resulting certificate as valid for signing.
+                This implies that the 'signing' usage is set
+              type: boolean
+            issuerRef:
+              description: IssuerRef is a reference to the issuer for this CertificateRequest.  If
+                the 'kind' field is not set, or set to 'Issuer', an Issuer resource
+                with the given name in the same namespace as the CertificateRequest
+                will be used.  If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer
+                with the provided name will be used. The 'name' field in this stanza
+                is required at all times. The group field refers to the API group
+                of the issuer which defaults to 'certmanager.k8s.io' if empty.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+          required:
+          - issuerRef
+          type: object
+        status:
+          properties:
+            ca:
+              description: Byte slice containing the PEM encoded certificate authority
+                of the signed certificate.
+              format: byte
+              type: string
+            certificate:
+              description: Byte slice containing a PEM encoded signed certificate
+                resulting from the given certificate signing request.
+              format: byte
+              type: string
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: challenges.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.state
+    name: State
+    type: string
+  - JSONPath: .spec.dnsName
+    name: Domain
+    type: string
+  - JSONPath: .status.reason
+    name: Reason
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: Challenge
+    plural: challenges
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            authzURL:
+              description: AuthzURL is the URL to the ACME Authorization resource
+                that this challenge is a part of.
+              type: string
+            config:
+              description: 'Config specifies the solver configuration for this challenge.
+                Only **one** of ''config'' or ''solver'' may be specified, and if
+                both are specified then no action will be performed on the Challenge
+                resource. DEPRECATED: the ''solver'' field should be specified instead'
+              type: object
+            dnsName:
+              description: DNSName is the identifier that this challenge is for, e.g.
+                example.com.
+              type: string
+            issuerRef:
+              description: IssuerRef references a properly configured ACME-type Issuer
+                which should be used to create this Challenge. If the Issuer does
+                not exist, processing will be retried. If the Issuer is not an 'ACME'
+                Issuer, an error will be returned and the Challenge will be marked
+                as failed.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+            key:
+              description: Key is the ACME challenge key for this challenge
+              type: string
+            solver:
+              description: Solver contains the domain solving configuration that should
+                be used to solve this challenge resource. Only **one** of 'config'
+                or 'solver' may be specified, and if both are specified then no action
+                will be performed on the Challenge resource.
+              properties:
+                selector:
+                  description: Selector selects a set of DNSNames on the Certificate
+                    resource that should be solved using this challenge solver.
+                  properties:
+                    dnsNames:
+                      description: List of DNSNames that this solver will be used
+                        to solve. If specified and a match is found, a dnsNames selector
+                        will take precedence over a dnsZones selector. If multiple
+                        solvers match with the same dnsNames value, the solver with
+                        the most matching labels in matchLabels will be selected.
+                        If neither has more matches, the solver defined earlier in
+                        the list will be selected.
+                      items:
+                        type: string
+                      type: array
+                    dnsZones:
+                      description: List of DNSZones that this solver will be used
+                        to solve. The most specific DNS zone match specified here
+                        will take precedence over other DNS zone matches, so a solver
+                        specifying sys.example.com will be selected over one specifying
+                        example.com for the domain www.sys.example.com. If multiple
+                        solvers match with the same dnsZones value, the solver with
+                        the most matching labels in matchLabels will be selected.
+                        If neither has more matches, the solver defined earlier in
+                        the list will be selected.
+                      items:
+                        type: string
+                      type: array
+                    matchLabels:
+                      description: A label selector that is used to refine the set
+                        of certificate's that this challenge solver will apply to.
+                      type: object
+                  type: object
+              type: object
+            token:
+              description: Token is the ACME challenge token for this challenge.
+              type: string
+            type:
+              description: Type is the type of ACME challenge this resource represents,
+                e.g. "dns01" or "http01"
+              type: string
+            url:
+              description: URL is the URL of the ACME Challenge resource for this
+                challenge. This can be used to lookup details about the status of
+                this challenge.
+              type: string
+            wildcard:
+              description: Wildcard will be true if this challenge is for a wildcard
+                identifier, for example '*.example.com'
+              type: boolean
+          required:
+          - authzURL
+          - type
+          - url
+          - dnsName
+          - token
+          - key
+          - wildcard
+          - issuerRef
+          type: object
+        status:
+          properties:
+            presented:
+              description: Presented will be set to true if the challenge values for
+                this challenge are currently 'presented'. This *does not* imply the
+                self check is passing. Only that the values have been 'submitted'
+                for the appropriate challenge mechanism (i.e. the DNS01 TXT record
+                has been presented, or the HTTP01 configuration has been configured).
+              type: boolean
+            processing:
+              description: Processing is used to denote whether this challenge should
+                be processed or not. This field will only be set to true by the 'scheduling'
+                component. It will only be set to false by the 'challenges' controller,
+                after the challenge has reached a final state or timed out. If this
+                field is set to false, the challenge controller will not take any
+                more action.
+              type: boolean
+            reason:
+              description: Reason contains human readable information on why the Challenge
+                is in the current state.
+              type: string
+            state:
+              description: State contains the current 'state' of the challenge. If
+                not set, the state of the challenge is unknown.
+              enum:
+              - ""
+              - valid
+              - ready
+              - pending
+              - processing
+              - invalid
+              - expired
+              - errored
+              type: string
+          required:
+          - processing
+          - presented
+          - reason
+          type: object
+      required:
+      - metadata
+      - spec
+      - status
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: clusterissuers.certmanager.k8s.io
+spec:
+  group: certmanager.k8s.io
+  names:
+    kind: ClusterIssuer
+    plural: clusterissuers
+  scope: Cluster
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            acme:
+              properties:
+                email:
+                  description: Email is the email for this account
+                  type: string
+                privateKeySecretRef:
+                  description: PrivateKey is the name of a secret containing the private
+                    key for this user account.
+                  properties:
+                    key:
+                      description: The key of the secret to select from. Must be a
+                        valid secret key.
+                      type: string
+                    name:
+                      description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                        TODO: Add other useful fields. apiVersion, kind, uid?'
+                      type: string
+                  required:
+                  - name
+                  type: object
+                server:
+                  description: Server is the ACME server URL
+                  type: string
+                skipTLSVerify:
+                  description: If true, skip verifying the ACME server TLS certificate
+                  type: boolean
+                solvers:
+                  description: Solvers is a list of challenge solvers that will be
+                    used to solve ACME challenges for the matching domains.
+                  items:
+                    properties:
+                      selector:
+                        description: Selector selects a set of DNSNames on the Certificate
+                          resource that should be solved using this challenge solver.
+                        properties:
+                          dnsNames:
+                            description: List of DNSNames that this solver will be
+                              used to solve. If specified and a match is found, a
+                              dnsNames selector will take precedence over a dnsZones
+                              selector. If multiple solvers match with the same dnsNames
+                              value, the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          dnsZones:
+                            description: List of DNSZones that this solver will be
+                              used to solve. The most specific DNS zone match specified
+                              here will take precedence over other DNS zone matches,
+                              so a solver specifying sys.example.com will be selected
+                              over one specifying example.com for the domain www.sys.example.com.
+                              If multiple solvers match with the same dnsZones value,
+                              the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          matchLabels:
+                            description: A label selector that is used to refine the
+                              set of certificate's that this challenge solver will
+                              apply to.
+                            type: object
+                        type: object
+                    type: object
+                  type: array
+              required:
+              - server
+              - privateKeySecretRef
+              type: object
+            ca:
+              properties:
+                secretName:
+                  description: SecretName is the name of the secret used to sign Certificates
+                    issued by this Issuer.
+                  type: string
+              required:
+              - secretName
+              type: object
+            selfSigned:
+              type: object
+            vault:
+              properties:
+                auth:
+                  description: Vault authentication
+                  properties:
+                    appRole:
+                      description: This Secret contains a AppRole and Secret
+                      properties:
+                        path:
+                          description: Where the authentication path is mounted in
+                            Vault.
+                          type: string
+                        roleId:
+                          type: string
+                        secretRef:
+                          properties:
+                            key:
+                              description: The key of the secret to select from. Must
+                                be a valid secret key.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                          required:
+                          - name
+                          type: object
+                      required:
+                      - path
+                      - roleId
+                      - secretRef
+                      type: object
+                    tokenSecretRef:
+                      description: This Secret contains the Vault token key
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                  type: object
+                caBundle:
+                  description: Base64 encoded CA bundle to validate Vault server certificate.
+                    Only used if the Server URL is using HTTPS protocol. This parameter
+                    is ignored for plain HTTP protocol connection. If not set the
+                    system root certificates are used to validate the TLS connection.
+                  format: byte
+                  type: string
+                path:
+                  description: Vault URL path to the certificate role
+                  type: string
+                server:
+                  description: Server is the vault connection address
+                  type: string
+              required:
+              - auth
+              - server
+              - path
+              type: object
+            venafi:
+              properties:
+                cloud:
+                  description: Cloud specifies the Venafi cloud configuration settings.
+                    Only one of TPP or Cloud may be specified.
+                  properties:
+                    apiTokenSecretRef:
+                      description: APITokenSecretRef is a secret key selector for
+                        the Venafi Cloud API token.
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for Venafi Cloud
+                      type: string
+                  required:
+                  - url
+                  - apiTokenSecretRef
+                  type: object
+                tpp:
+                  description: TPP specifies Trust Protection Platform configuration
+                    settings. Only one of TPP or Cloud may be specified.
+                  properties:
+                    caBundle:
+                      description: CABundle is a PEM encoded TLS certifiate to use
+                        to verify connections to the TPP instance. If specified, system
+                        roots will not be used and the issuing CA for the TPP instance
+                        must be verifiable using the provided root. If not specified,
+                        the connection will be verified using the cert-manager system
+                        root certificates.
+                      format: byte
+                      type: string
+                    credentialsRef:
+                      description: CredentialsRef is a reference to a Secret containing
+                        the username and password for the TPP server. The secret must
+                        contain two keys, 'username' and 'password'.
+                      properties:
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for the Venafi TPP instance
+                      type: string
+                  required:
+                  - url
+                  - credentialsRef
+                  type: object
+                zone:
+                  description: Zone is the Venafi Policy Zone to use for this issuer.
+                    All requests made to the Venafi platform will be restricted by
+                    the named zone policy. This field is required.
+                  type: string
+              required:
+              - zone
+              type: object
+          type: object
+        status:
+          properties:
+            acme:
+              properties:
+                lastRegisteredEmail:
+                  description: LastRegisteredEmail is the email associated with the
+                    latest registered ACME account, in order to track changes made
+                    to registered account associated with the  Issuer
+                  type: string
+                uri:
+                  description: URI is the unique account identifier, which can also
+                    be used to retrieve account details from the CA
+                  type: string
+              type: object
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: issuers.certmanager.k8s.io
+spec:
+  group: certmanager.k8s.io
+  names:
+    kind: Issuer
+    plural: issuers
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            acme:
+              properties:
+                email:
+                  description: Email is the email for this account
+                  type: string
+                privateKeySecretRef:
+                  description: PrivateKey is the name of a secret containing the private
+                    key for this user account.
+                  properties:
+                    key:
+                      description: The key of the secret to select from. Must be a
+                        valid secret key.
+                      type: string
+                    name:
+                      description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                        TODO: Add other useful fields. apiVersion, kind, uid?'
+                      type: string
+                  required:
+                  - name
+                  type: object
+                server:
+                  description: Server is the ACME server URL
+                  type: string
+                skipTLSVerify:
+                  description: If true, skip verifying the ACME server TLS certificate
+                  type: boolean
+                solvers:
+                  description: Solvers is a list of challenge solvers that will be
+                    used to solve ACME challenges for the matching domains.
+                  items:
+                    properties:
+                      selector:
+                        description: Selector selects a set of DNSNames on the Certificate
+                          resource that should be solved using this challenge solver.
+                        properties:
+                          dnsNames:
+                            description: List of DNSNames that this solver will be
+                              used to solve. If specified and a match is found, a
+                              dnsNames selector will take precedence over a dnsZones
+                              selector. If multiple solvers match with the same dnsNames
+                              value, the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          dnsZones:
+                            description: List of DNSZones that this solver will be
+                              used to solve. The most specific DNS zone match specified
+                              here will take precedence over other DNS zone matches,
+                              so a solver specifying sys.example.com will be selected
+                              over one specifying example.com for the domain www.sys.example.com.
+                              If multiple solvers match with the same dnsZones value,
+                              the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          matchLabels:
+                            description: A label selector that is used to refine the
+                              set of certificate's that this challenge solver will
+                              apply to.
+                            type: object
+                        type: object
+                    type: object
+                  type: array
+              required:
+              - server
+              - privateKeySecretRef
+              type: object
+            ca:
+              properties:
+                secretName:
+                  description: SecretName is the name of the secret used to sign Certificates
+                    issued by this Issuer.
+                  type: string
+              required:
+              - secretName
+              type: object
+            selfSigned:
+              type: object
+            vault:
+              properties:
+                auth:
+                  description: Vault authentication
+                  properties:
+                    appRole:
+                      description: This Secret contains a AppRole and Secret
+                      properties:
+                        path:
+                          description: Where the authentication path is mounted in
+                            Vault.
+                          type: string
+                        roleId:
+                          type: string
+                        secretRef:
+                          properties:
+                            key:
+                              description: The key of the secret to select from. Must
+                                be a valid secret key.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                          required:
+                          - name
+                          type: object
+                      required:
+                      - path
+                      - roleId
+                      - secretRef
+                      type: object
+                    tokenSecretRef:
+                      description: This Secret contains the Vault token key
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                  type: object
+                caBundle:
+                  description: Base64 encoded CA bundle to validate Vault server certificate.
+                    Only used if the Server URL is using HTTPS protocol. This parameter
+                    is ignored for plain HTTP protocol connection. If not set the
+                    system root certificates are used to validate the TLS connection.
+                  format: byte
+                  type: string
+                path:
+                  description: Vault URL path to the certificate role
+                  type: string
+                server:
+                  description: Server is the vault connection address
+                  type: string
+              required:
+              - auth
+              - server
+              - path
+              type: object
+            venafi:
+              properties:
+                cloud:
+                  description: Cloud specifies the Venafi cloud configuration settings.
+                    Only one of TPP or Cloud may be specified.
+                  properties:
+                    apiTokenSecretRef:
+                      description: APITokenSecretRef is a secret key selector for
+                        the Venafi Cloud API token.
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for Venafi Cloud
+                      type: string
+                  required:
+                  - url
+                  - apiTokenSecretRef
+                  type: object
+                tpp:
+                  description: TPP specifies Trust Protection Platform configuration
+                    settings. Only one of TPP or Cloud may be specified.
+                  properties:
+                    caBundle:
+                      description: CABundle is a PEM encoded TLS certifiate to use
+                        to verify connections to the TPP instance. If specified, system
+                        roots will not be used and the issuing CA for the TPP instance
+                        must be verifiable using the provided root. If not specified,
+                        the connection will be verified using the cert-manager system
+                        root certificates.
+                      format: byte
+                      type: string
+                    credentialsRef:
+                      description: CredentialsRef is a reference to a Secret containing
+                        the username and password for the TPP server. The secret must
+                        contain two keys, 'username' and 'password'.
+                      properties:
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for the Venafi TPP instance
+                      type: string
+                  required:
+                  - url
+                  - credentialsRef
+                  type: object
+                zone:
+                  description: Zone is the Venafi Policy Zone to use for this issuer.
+                    All requests made to the Venafi platform will be restricted by
+                    the named zone policy. This field is required.
+                  type: string
+              required:
+              - zone
+              type: object
+          type: object
+        status:
+          properties:
+            acme:
+              properties:
+                lastRegisteredEmail:
+                  description: LastRegisteredEmail is the email associated with the
+                    latest registered ACME account, in order to track changes made
+                    to registered account associated with the  Issuer
+                  type: string
+                uri:
+                  description: URI is the unique account identifier, which can also
+                    be used to retrieve account details from the CA
+                  type: string
+              type: object
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: orders.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.state
+    name: State
+    type: string
+  - JSONPath: .spec.issuerRef.name
+    name: Issuer
+    priority: 1
+    type: string
+  - JSONPath: .status.reason
+    name: Reason
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: Order
+    plural: orders
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            commonName:
+              description: CommonName is the common name as specified on the DER encoded
+                CSR. If CommonName is not specified, the first DNSName specified will
+                be used as the CommonName. At least one of CommonName or a DNSNames
+                must be set. This field must match the corresponding field on the
+                DER encoded CSR.
+              type: string
+            config:
+              description: 'Config specifies a mapping from DNS identifiers to how
+                those identifiers should be solved when performing ACME challenges.
+                A config entry must exist for each domain listed in DNSNames and CommonName.
+                Only **one** of ''config'' or ''solvers'' may be specified, and if
+                both are specified then no action will be performed on the Order resource.  This
+                field will be removed when support for solver config specified on
+                the Certificate under certificate.spec.acme has been removed. DEPRECATED:
+                this field will be removed in future. Solver configuration must instead
+                be provided on ACME Issuer resources.'
+              items:
+                properties:
+                  domains:
+                    description: Domains is the list of domains that this SolverConfig
+                      applies to.
+                    items:
+                      type: string
+                    type: array
+                required:
+                - domains
+                type: object
+              type: array
+            csr:
+              description: Certificate signing request bytes in DER encoding. This
+                will be used when finalizing the order. This field must be set on
+                the order.
+              format: byte
+              type: string
+            dnsNames:
+              description: DNSNames is a list of DNS names that should be included
+                as part of the Order validation process. If CommonName is not specified,
+                the first DNSName specified will be used as the CommonName. At least
+                one of CommonName or a DNSNames must be set. This field must match
+                the corresponding field on the DER encoded CSR.
+              items:
+                type: string
+              type: array
+            issuerRef:
+              description: IssuerRef references a properly configured ACME-type Issuer
+                which should be used to create this Order. If the Issuer does not
+                exist, processing will be retried. If the Issuer is not an 'ACME'
+                Issuer, an error will be returned and the Order will be marked as
+                failed.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+          required:
+          - csr
+          - issuerRef
+          type: object
+        status:
+          properties:
+            certificate:
+              description: Certificate is a copy of the PEM encoded certificate for
+                this Order. This field will be populated after the order has been
+                successfully finalized with the ACME server, and the order has transitioned
+                to the 'valid' state.
+              format: byte
+              type: string
+            challenges:
+              description: Challenges is a list of ChallengeSpecs for Challenges that
+                must be created in order to complete this Order.
+              items:
+                properties:
+                  authzURL:
+                    description: AuthzURL is the URL to the ACME Authorization resource
+                      that this challenge is a part of.
+                    type: string
+                  config:
+                    description: 'Config specifies the solver configuration for this
+                      challenge. Only **one** of ''config'' or ''solver'' may be specified,
+                      and if both are specified then no action will be performed on
+                      the Challenge resource. DEPRECATED: the ''solver'' field should
+                      be specified instead'
+                    type: object
+                  dnsName:
+                    description: DNSName is the identifier that this challenge is
+                      for, e.g. example.com.
+                    type: string
+                  issuerRef:
+                    description: IssuerRef references a properly configured ACME-type
+                      Issuer which should be used to create this Challenge. If the
+                      Issuer does not exist, processing will be retried. If the Issuer
+                      is not an 'ACME' Issuer, an error will be returned and the Challenge
+                      will be marked as failed.
+                    properties:
+                      group:
+                        type: string
+                      kind:
+                        type: string
+                      name:
+                        type: string
+                    required:
+                    - name
+                    type: object
+                  key:
+                    description: Key is the ACME challenge key for this challenge
+                    type: string
+                  solver:
+                    description: Solver contains the domain solving configuration
+                      that should be used to solve this challenge resource. Only **one**
+                      of 'config' or 'solver' may be specified, and if both are specified
+                      then no action will be performed on the Challenge resource.
+                    properties:
+                      selector:
+                        description: Selector selects a set of DNSNames on the Certificate
+                          resource that should be solved using this challenge solver.
+                        properties:
+                          dnsNames:
+                            description: List of DNSNames that this solver will be
+                              used to solve. If specified and a match is found, a
+                              dnsNames selector will take precedence over a dnsZones
+                              selector. If multiple solvers match with the same dnsNames
+                              value, the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          dnsZones:
+                            description: List of DNSZones that this solver will be
+                              used to solve. The most specific DNS zone match specified
+                              here will take precedence over other DNS zone matches,
+                              so a solver specifying sys.example.com will be selected
+                              over one specifying example.com for the domain www.sys.example.com.
+                              If multiple solvers match with the same dnsZones value,
+                              the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          matchLabels:
+                            description: A label selector that is used to refine the
+                              set of certificate's that this challenge solver will
+                              apply to.
+                            type: object
+                        type: object
+                    type: object
+                  token:
+                    description: Token is the ACME challenge token for this challenge.
+                    type: string
+                  type:
+                    description: Type is the type of ACME challenge this resource
+                      represents, e.g. "dns01" or "http01"
+                    type: string
+                  url:
+                    description: URL is the URL of the ACME Challenge resource for
+                      this challenge. This can be used to lookup details about the
+                      status of this challenge.
+                    type: string
+                  wildcard:
+                    description: Wildcard will be true if this challenge is for a
+                      wildcard identifier, for example '*.example.com'
+                    type: boolean
+                required:
+                - authzURL
+                - type
+                - url
+                - dnsName
+                - token
+                - key
+                - wildcard
+                - issuerRef
+                type: object
+              type: array
+            failureTime:
+              description: FailureTime stores the time that this order failed. This
+                is used to influence garbage collection and back-off.
+              format: date-time
+              type: string
+            finalizeURL:
+              description: FinalizeURL of the Order. This is used to obtain certificates
+                for this order once it has been completed.
+              type: string
+            reason:
+              description: Reason optionally provides more information about a why
+                the order is in the current state.
+              type: string
+            state:
+              description: State contains the current state of this Order resource.
+                States 'success' and 'expired' are 'final'
+              enum:
+              - ""
+              - valid
+              - ready
+              - pending
+              - processing
+              - invalid
+              - expired
+              - errored
+              type: string
+            url:
+              description: URL of the Order. This will initially be empty when the
+                resource is first created. The Order controller will populate this
+                field when the Order is first processed. This field will be immutable
+                after it is initially set.
+              type: string
+          type: object
+      required:
+      - metadata
+      - spec
+      - status
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/values.yaml
similarity index 77%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/values.yaml
index d0cfc24..0c6d2cf 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager-crd-chart/values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,10 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+replicaCount: 1
+
+ingress:
+  enabled: false
+labels: {}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager.tf
new file mode 100644
index 0000000..c6b8874
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/cert-manager.tf
@@ -0,0 +1,57 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "cert_manager_values" {
+  template = file("./files/cert_manager_values.yaml")
+}
+
+resource "helm_release" "cert_manager_crd" {
+    name       = "cert_manager_crd"
+    chart      = "./cert-manager-crd-chart"
+    wait       = true
+}
+
+data "helm_repository" "jetstack" {
+  name = "jetstack"
+  url  = "https://charts.jetstack.io"
+}
+
+resource "helm_release" "cert-manager" {
+    name       = "cert-manager"
+    repository = data.helm_repository.jetstack.metadata.0.name
+    chart      = "jetstack/cert-manager"
+    namespace  = kubernetes_namespace.cert-manager-namespace.metadata[0].name
+    depends_on = [helm_release.cert_manager_crd]
+    wait       = true
+    version    = "v0.9.0"
+    values     = [
+        data.template_file.cert_manager_values.rendered
+    ]
+}
+
+resource "null_resource" "cert_manager_delay" {
+    provisioner "local-exec" {
+        command = "sleep 120"
+    }
+    triggers = {
+        "after" = helm_release.cert-manager.name
+    }
+}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-billing.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-billing.tf
new file mode 100644
index 0000000..b301723
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-billing.tf
@@ -0,0 +1,57 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "dlab_billing_values" {
+  template = file("./dlab-billing-chart/values.yaml")
+  vars = {
+    mongo_db_name           = var.mongo_dbname
+    mongo_user              = var.mongo_db_username
+    mongo_port              = var.mongo_service_port
+    mongo_service_name      = var.mongo_service_name
+    service_base_name       = var.service_base_name
+    tag_resource_id         = var.tag_resource_id
+    billing_bucket          = var.billing_bucket
+    billing_bucket_path     = var.billing_bucket_path
+    billing_aws_job_enabled = var.billing_aws_job_enabled
+    billing_aws_account_id  = var.billing_aws_account_id
+    billing_tag             = var.billing_tag
+    billing_dlab_id         = var.billing_dlab_id
+    billing_usage_date      = var.billing_usage_date
+    billing_product         = var.billing_product
+    billing_usage_type      = var.billing_usage_type
+    billing_usage           = var.billing_usage
+    billing_cost            = var.billing_cost
+    billing_resource_id     = var.billing_resource_id
+    billing_tags            = var.billing_tags
+  }
+}
+
+resource "helm_release" "dlab-billing" {
+  name       = "dlab-billing"
+  chart      = "./dlab-billing-chart"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  depends_on = [helm_release.mongodb, kubernetes_secret.mongo_db_password_secret]
+  wait = true
+
+  values     = [
+      data.template_file.dlab_billing_values.rendered
+  ]
+}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/cert.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/cert.yaml
new file mode 100644
index 0000000..5762e9a
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/cert.yaml
@@ -0,0 +1,64 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+{{- if not .Values.ui.custom_certs.enabled -}}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+  name: dlab-ui
+  namespace: {{ .Values.namespace }}
+spec:
+  # The secret name to store the signed certificate
+  secretName: {{ include "dlab-ui.fullname" . }}-tls
+  # Common Name
+  commonName: dlab-kubernetes-cluster
+  # DNS SAN
+  dnsNames:
+    - localhost
+    - {{ .Values.ui.ingress.host }}
+  # IP Address SAN
+  ipAddresses:
+    - "127.0.0.1"
+  # Duration of the certificate
+  duration: 24h
+  # Renew 8 hours before the certificate expiration
+  renewBefore: 8h
+  # The reference to the step issuer
+  issuerRef:
+    group: certmanager.step.sm
+    kind: Issuer
+    name: step-issuer
+{{- end }}
+---
+{{- if .Values.ui.custom_certs.enabled -}}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "dlab-ui.fullname" . }}-tls
+  namespace: {{ .Values.namespace }}
+type: kubernetes.io/tls
+data:
+  ca.crt: {{ .Values.ui.custom_certs.ca }}
+  tls.crt: {{ .Values.ui.custom_certs.crt }}
+  tls.key: {{ .Values.ui.custom_certs.key }}
+{{- end }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/configmap-ui-conf.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/configmap-ui-conf.yaml
new file mode 100644
index 0000000..abc2517
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/configmap-ui-conf.yaml
@@ -0,0 +1,235 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "dlab-ui.fullname" . }}-ui-conf
+data:
+  ssn.yml: |
+    <#assign LOG_ROOT_DIR="/var/opt/dlab/log">
+    <#assign KEYS_DIR="/root/keys">
+    <#assign KEY_STORE_PATH="/root/keys/ssn.keystore.jks">
+    <#assign KEY_STORE_PASSWORD="${SSN_KEYSTORE_PASSWORD}">
+    <#assign TRUST_STORE_PATH="/usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts">
+    <#assign TRUST_STORE_PASSWORD="changeit">
+
+    # Available options are aws, azure, gcp
+    <#assign CLOUD_TYPE="aws">
+    cloudProvider: ${CLOUD_TYPE}
+
+    #Switch on/off developer mode here
+    <#assign DEV_MODE="false">
+    devMode: ${DEV_MODE}
+
+    mongo:
+      host: {{ .Values.ui.mongo.host }}
+      port: {{ .Values.ui.mongo.port }}
+      username: {{ .Values.ui.mongo.username }}
+      password: ${MONGO_DB_PASSWORD}
+      database: {{ .Values.ui.mongo.db_name }}
+
+    selfService:
+      protocol: https
+      host: localhost
+      port: {{ .Values.ui.service.https_port }}
+      jerseyClient:
+        timeout: 3s
+        connectionTimeout: 3s
+
+    securityService:
+      protocol: https
+      host: localhost
+      port: 8090
+      jerseyClient:
+        timeout: 20s
+        connectionTimeout: 20s
+
+    provisioningService:
+      jerseyClient:
+        timeout: 3s
+        connectionTimeout: 3s
+
+    # Log out user on inactivity
+    inactiveUserTimeoutMillSec: 7200000
+
+  self-service.yml: |
+    <#include "/root/ssn.yml">
+
+    <#if CLOUD_TYPE == "aws">
+    # Minimum and maximum number of slave EMR instances than could be created
+    minEmrInstanceCount: 2
+    maxEmrInstanceCount: 14
+    # Minimum and maximum percentage cost for slave EMR spot instances biding
+    minEmrSpotInstanceBidPct: 20
+    maxEmrSpotInstanceBidPct: 90
+    </#if>
+
+    <#if CLOUD_TYPE == "gcp">
+    # Maximum length for gcp user name (due to gcp restrictions)
+    maxUserNameLength: 10
+    # Minimum and maximum number of slave Dataproc instances that could be created
+    minInstanceCount: 3
+    maxInstanceCount: 15
+    minDataprocPreemptibleCount: 0
+    gcpOuauth2AuthenticationEnabled: false
+    </#if>
+
+    # Boundaries for Spark cluster creation
+    minSparkInstanceCount: 2
+    maxSparkInstanceCount: 14
+
+    # Timeout for check the status of environment via provisioning service
+    checkEnvStatusTimeout: 5m
+
+    # Restrict access to DLab features using roles policy
+    rolePolicyEnabled: true
+    # Default access to DLab features using roles policy
+    roleDefaultAccess: true
+
+    # Set to true to enable the scheduler of billing report.
+    billingSchedulerEnabled: true
+    # Name of configuration file for billing report.
+    <#if DEV_MODE == "true">
+    billingConfFile: ${sys['user.dir']}/../billing/billing.yml
+    <#else>
+    billingConfFile: ${DLAB_CONF_DIR}/billing.yml
+    </#if>
+
+    <#if CLOUD_TYPE == "azure">
+    azureUseLdap: <LOGIN_USE_LDAP>
+    maxSessionDurabilityMilliseconds: 288000000
+    </#if>
+
+    serviceBaseName: {{ .Values.ui.service_base_name }}
+    os: {{ .Values.ui.os }}
+    server:
+      requestLog:
+        appenders:
+        - type: file
+          currentLogFilename: ${LOG_ROOT_DIR}/ssn/request-selfservice.log
+          archive: true
+          archivedLogFilenamePattern: ${LOG_ROOT_DIR}/ssn/request-selfservice-%d{yyyy-MM-dd}.log.gz
+          archivedFileCount: 10
+      rootPath: "/api"
+      applicationConnectors:
+      - type: http
+        port: {{ .Values.ui.service.http_port }}
+      - type: https
+        port: {{ .Values.ui.service.https_port }}
+        certAlias: ssn
+        validateCerts: false
+        keyStorePath: ${KEY_STORE_PATH}
+        keyStorePassword: ${KEY_STORE_PASSWORD}
+        trustStorePath: ${TRUST_STORE_PATH}
+        trustStorePassword: ${TRUST_STORE_PASSWORD}
+      adminConnectors:
+    #    - type: http
+    #      port: 8081
+      - type: https
+        port: 8444
+        certAlias: ssn
+        validateCerts: false
+        keyStorePath: ${KEY_STORE_PATH}
+        keyStorePassword: ${KEY_STORE_PASSWORD}
+        trustStorePath: ${TRUST_STORE_PATH}
+        trustStorePassword: ${TRUST_STORE_PASSWORD}
+
+    mongoMigrationEnabled: false
+
+    logging:
+      level: INFO
+      loggers:
+        com.epam: TRACE
+        com.novemberain: ERROR
+      appenders:
+      - type: console
+      - type: file
+        currentLogFilename: ${LOG_ROOT_DIR}/ssn/selfservice.log
+        archive: true
+        archivedLogFilenamePattern: ${LOG_ROOT_DIR}/ssn/selfservice-%d{yyyy-MM-dd}.log.gz
+        archivedFileCount: 10
+
+    mavenSearchService:
+      protocol: http
+      host: search.maven.org
+      port: 80
+      jerseyClient:
+        timeout: 5s
+        connectionTimeout: 5s
+
+    schedulers:
+      inactivity:
+        enabled: false
+        cron: "0 0 0/2 ? * * *"
+      startComputationalScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      stopComputationalScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      startExploratoryScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      stopExploratoryScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      terminateComputationalScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      checkQuoteScheduler:
+        enabled: true
+        cron: "0 0 * ? * * *"
+      checkUserQuoteScheduler:
+        enabled: false
+        cron: "0 0 * ? * * *"
+      checkProjectQuoteScheduler:
+        enabled: true
+        cron: "0 * * ? * * *"
+
+
+    guacamole:
+      connectionProtocol: ssh
+      serverPort: 4822
+      port: 22
+      username: dlab-user
+
+    keycloakConfiguration:
+      redirectUri: {{ .Values.ui.keycloak.redirect_uri }}
+      realm: {{ .Values.ui.keycloak.realm_name }}
+      bearer-only: true
+      auth-server-url: ${KEYCLOAK_AUTH_URL}
+      ssl-required: none
+      register-node-at-startup: true
+      register-node-period: 600
+      resource: {{ .Values.ui.keycloak.client_id }}
+      credentials:
+        secret: ${KEYCLOAK_CLIENT_SECRET}
+
+    jerseyClient:
+      minThreads: 1
+      maxThreads: 128
+      workQueueSize: 8
+      gzipEnabled: true
+      gzipEnabledForRequests: false
+      chunkedEncodingEnabled: true
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/deployment.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/deployment.yaml
new file mode 100644
index 0000000..03c469e
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/deployment.yaml
@@ -0,0 +1,107 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "dlab-ui.fullname" . }}
+  labels:
+{{ include "dlab-ui.labels" . | indent 4 }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ include "dlab-ui.name" . }}
+      app.kubernetes.io/instance: {{ .Release.Name }}
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/name: {{ include "dlab-ui.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+    spec:
+      containers:
+        - name: {{ .Chart.Name }}
+          image: "{{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag }}"
+          imagePullPolicy: {{ .Values.ui.image.pullPolicy }}
+          env:
+            - name: MONGO_DB_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: mongo-db-password
+                  key: password
+            - name: SSN_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: ssn-keystore-password
+                  key: password
+            - name: KEYCLOAK_CLIENT_SECRET
+              valueFrom:
+                secretKeyRef:
+                  name: keycloak-client-secret
+                  key: client_secret
+            - name: KEYCLOAK_AUTH_URL
+              value: {{ .Values.ui.keycloak.auth_server_url }}
+          ports:
+            - name: http
+              containerPort: 80
+              protocol: TCP
+          resources:
+            {{- toYaml .Values.resources | nindent 12 }}
+          volumeMounts:
+            - name: ui-conf
+              mountPath: /root/ssn.yml
+              subPath: ssn
+              readOnly: true
+            - name: ui-conf
+              mountPath: /root/self-service.yml
+              subPath: self-service
+              readOnly: true
+            - mountPath: "/root/step-certs"
+              name: ui-tls
+              readOnly: true
+      volumes:
+        - name: ui-conf
+          configMap:
+            name: {{ include "dlab-ui.fullname" . }}-ui-conf
+            defaultMode: 0644
+            items:
+              - key: ssn.yml
+                path: ssn
+              - key: self-service.yml
+                path: self-service
+        - name: ui-tls
+          secret:
+            secretName: {{ include "dlab-ui.fullname" . }}-tls
+
+      {{- with .Values.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+    {{- with .Values.affinity }}
+      affinity:
+        {{- toYaml . | nindent 8 }}
+    {{- end }}
+    {{- with .Values.tolerations }}
+      tolerations:
+        {{- toYaml . | nindent 8 }}
+    {{- end }}
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/service.yaml
similarity index 60%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/service.yaml
index 951fdd7..86c35dc 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/templates/service.yaml
@@ -1,3 +1,4 @@
+{{- /*
 # *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -18,23 +19,25 @@
 # under the License.
 #
 # ******************************************************************************
+*/ -}}
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "dlab-ui.fullname" . }}
+  labels:
+{{ include "dlab-ui.labels" . | indent 4 }}
+spec:
+  type: {{ .Values.ui.service.type }}
+  ports:
+    - port: {{ .Values.ui.service.http_port }}
+      targetPort: {{ .Values.ui.service.http_port }}
+      protocol: TCP
+      name: http
+    - port: {{ .Values.ui.service.https_port }}
+      targetPort: {{ .Values.ui.service.https_port }}
+      protocol: TCP
+      name: https
+  selector:
+    app.kubernetes.io/name: {{ include "dlab-ui.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/values.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/values.yaml
new file mode 100644
index 0000000..84206dd
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui-chart/values.yaml
@@ -0,0 +1,69 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+# Default values for dlab-ui.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+labels: {}
+namespace: ${namespace}
+
+ui:
+  service_base_name: ${service_base_name}
+  os: ${os}
+  image:
+    repository: epamdlab/ui
+    tag: '0.1-aws'
+    pullPolicy: Always
+  service:
+    type: ClusterIP
+    #  port: 58443
+    http_port: 58080
+    https_port: 58443
+  ingress:
+    enabled: true
+    host: ${ssn_k8s_alb_dns_name}
+    annotations:
+      kubernetes.io/ingress.class: nginx
+      nginx.ingress.kubernetes.io/ssl-redirect: "true"
+      nginx.ingress.kubernetes.io/proxy-body-size: "50m"
+
+    tls:
+      - secretName: dlab-ui-tls
+        hosts:
+          - ${ssn_k8s_alb_dns_name}
+  mongo:
+    host: ${mongo_service_name}
+    port: ${mongo_port}
+    username: ${mongo_user}
+    db_name: ${mongo_db_name}
+  keycloak:
+    auth_server_url: https://${ssn_k8s_alb_dns_name}/auth
+    redirect_uri: https://${ssn_k8s_alb_dns_name}/
+    realm_name: ${keycloak_realm_name}
+    client_id: ${keycloak_client_id}
+
+  custom_certs:
+    enabled: ${custom_certs_enabled}
+    crt: ${custom_certs_crt}
+    key: ${custom_certs_key}
+    ca: ${step_ca_crt}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui.tf
new file mode 100644
index 0000000..87dbf3c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/dlab-ui.tf
@@ -0,0 +1,70 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+    custom_certs_enabled = lower(var.custom_certs_enabled)
+    custom_cert_name     = local.custom_certs_enabled == "true" ? reverse(split("/", var.custom_cert_path))[0] : "None"
+    custom_key_name      = local.custom_certs_enabled == "true" ? reverse(split("/", var.custom_key_path))[0] : "None"
+    custom_cert          = local.custom_certs_enabled == "true" ? base64encode(file("/tmp/${local.custom_cert_name}")) : "None"
+    custom_key           = local.custom_certs_enabled == "true" ? base64encode(file("/tmp/${local.custom_key_name}")) : "None"
+    ui_host              = local.custom_certs_enabled == "true" ? var.custom_certs_host : data.kubernetes_service.nginx-service.load_balancer_ingress.0.hostname
+}
+
+data "template_file" "dlab_ui_values" {
+  template = file("./dlab-ui-chart/values.yaml")
+  vars = {
+      mongo_db_name          = var.mongo_dbname
+      mongo_user             = var.mongo_db_username
+      mongo_port             = var.mongo_service_port
+      mongo_service_name     = var.mongo_service_name
+      ssn_k8s_alb_dns_name   = local.ui_host
+      service_base_name      = var.service_base_name
+      os                     = var.env_os
+      namespace              = kubernetes_namespace.dlab-namespace.metadata[0].name
+      custom_certs_enabled   = local.custom_certs_enabled
+      custom_certs_crt       = local.custom_cert
+      custom_certs_key       = local.custom_key
+      step_ca_crt            = lookup(data.external.step-ca-config-values.result, "rootCa")
+      keycloak_realm_name    = var.keycloak_realm_name
+      keycloak_client_id     = var.keycloak_client_id
+  }
+}
+
+resource "helm_release" "dlab_ui" {
+    name       = "dlab-ui"
+    chart      = "./dlab-ui-chart"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    depends_on = [helm_release.mongodb, kubernetes_secret.mongo_db_password_secret, null_resource.step_ca_issuer_delay]
+    wait       = true
+
+    values     = [
+        data.template_file.dlab_ui_values.rendered
+    ]
+}
+
+data "kubernetes_service" "nginx-service" {
+    metadata {
+        name      = "${helm_release.nginx.name}-controller"
+        namespace = kubernetes_namespace.dlab-namespace.metadata[0].name
+    }
+}
+
+
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/cert_manager_values.yaml
similarity index 76%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/cert_manager_values.yaml
index d0cfc24..688e91f 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/cert_manager_values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,7 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+extraArgs:
+  - --feature-gates=CertificateRequestControllers=true
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/configure_keycloak.sh b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/configure_keycloak.sh
new file mode 100644
index 0000000..26662bc
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/configure_keycloak.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+      # *****************************************************************************
+      #
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements.  See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership.  The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License.  You may obtain a copy of the License at
+      #
+      #   http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing,
+      # software distributed under the License is distributed on an
+      # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+      # KIND, either express or implied.  See the License for the
+      # specific language governing permissions and limitations
+      # under the License.
+      #
+      # ******************************************************************************
+
+      # 6 spaces needed as this file will be pasted in keycloak_values.yaml by Terraform
+      set -x
+      auth () {
+          RUN=$(/opt/jboss/keycloak/bin/kcadm.sh config credentials --server http://127.0.0.1:8080/auth --realm master \
+          --user ${keycloak_user} --password ${keycloak_password} > /dev/null && echo "true" || echo "false")
+      }
+      check_realm () {
+          RUN=$(/opt/jboss/keycloak/bin/kcadm.sh get realms/${keycloak_realm_name} > /dev/null && echo "true" || echo "false")
+      }
+      configure_keycloak () {
+          # Create Realm
+          /opt/jboss/keycloak/bin/kcadm.sh create realms -s realm=${keycloak_realm_name} -s enabled=true -s loginTheme=dlab \
+          -s sslRequired=none
+          # Get realm ID
+          dlab_realm_id=$(/opt/jboss/keycloak/bin/kcadm.sh get realms/${keycloak_realm_name} | /usr/bin/jq -r '.id')
+          # Create user federation
+          /opt/jboss/keycloak/bin/kcadm.sh create components -r ${keycloak_realm_name} -s name=dlab-ldap -s providerId=ldap \
+          -s providerType=org.keycloak.storage.UserStorageProvider -s parentId=$dlab_realm_id  -s 'config.priority=["1"]' \
+          -s 'config.fullSyncPeriod=["-1"]' -s 'config.changedSyncPeriod=["-1"]' -s 'config.cachePolicy=["DEFAULT"]' \
+          -s config.evictionDay=[] -s config.evictionHour=[] -s config.evictionMinute=[] -s config.maxLifespan=[] -s \
+          'config.batchSizeForSync=["1000"]' -s 'config.editMode=["READ_ONLY"]' -s 'config.syncRegistrations=["false"]' \
+          -s 'config.vendor=["other"]' -s 'config.usernameLDAPAttribute=["${ldap_usernameAttr}"]' \
+          -s 'config.rdnLDAPAttribute=["${ldap_rdnAttr}"]' -s 'config.uuidLDAPAttribute=["${ldap_uuidAttr}"]' \
+          -s 'config.userObjectClasses=["inetOrgPerson, organizationalPerson"]' \
+          -s 'config.connectionUrl=["ldap://${ldap_host}:389"]'  -s 'config.usersDn=["${ldap_users_group},${ldap_dn}"]' \
+          -s 'config.authType=["simple"]' -s 'config.bindDn=["${ldap_user},${ldap_dn}"]' \
+          -s 'config.bindCredential=["${ldap_bind_creds}"]' -s 'config.searchScope=["1"]' \
+          -s 'config.useTruststoreSpi=["ldapsOnly"]' -s 'config.connectionPooling=["true"]' \
+          -s 'config.pagination=["true"]' --server http://127.0.0.1:8080/auth
+          # Get user federation ID
+          user_f_id=$(/opt/jboss/keycloak/bin/kcadm.sh get components -r ${keycloak_realm_name} --query name=dlab-ldap | /usr/bin/jq -er '.[].id')
+          # Create user federation email mapper
+          /opt/jboss/keycloak/bin/kcadm.sh create components -r ${keycloak_realm_name} -s name=uid-attribute-to-email-mapper \
+          -s providerId=user-attribute-ldap-mapper -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper \
+          -s parentId=$user_f_id -s 'config."user.model.attribute"=["email"]' \
+          -s 'config."ldap.attribute"=["uid"]' -s 'config."read.only"=["false"]' \
+          -s 'config."always.read.value.from.ldap"=["false"]' -s 'config."is.mandatory.in.ldap"=["false"]'
+          # Create user federation group mapper
+          /opt/jboss/keycloak/bin/kcadm.sh create components -r ${keycloak_realm_name} -s name=group_mapper -s providerId=group-ldap-mapper \
+          -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper -s parentId=$user_f_id \
+          -s 'config."groups.dn"=["ou=Groups,${ldap_dn}"]' -s 'config."group.name.ldap.attribute"=["cn"]' \
+          -s 'config."group.object.classes"=["posixGroup"]' -s 'config."preserve.group.inheritance"=["false"]' \
+          -s 'config."membership.ldap.attribute"=["memberUid"]' -s 'config."membership.attribute.type"=["UID"]' \
+          -s 'config."groups.ldap.filter"=[]' -s 'config.mode=["IMPORT"]' \
+          -s 'config."user.roles.retrieve.strategy"=["LOAD_GROUPS_BY_MEMBER_ATTRIBUTE"]' \
+          -s 'config."mapped.group.attributes"=[]' -s 'config."drop.non.existing.groups.during.sync"=["false"]'
+          # Create client
+          /opt/jboss/keycloak/bin/kcadm.sh create clients -r ${keycloak_realm_name} -s clientId=${keycloak_client_id} -s enabled=true -s \
+          'redirectUris=["https://${ssn_k8s_alb_dns_name}/"]' -s secret=${keycloak_client_secret} -s \
+          serviceAccountsEnabled=true
+          # Get clint ID
+          client_id=$(/opt/jboss/keycloak/bin/kcadm.sh get clients -r ${keycloak_realm_name} --query clientId=${keycloak_client_id} | /usr/bin/jq -er '.[].id')
+          # Create client mapper
+          /opt/jboss/keycloak/bin/kcadm.sh create clients/$client_id/protocol-mappers/models \
+          -r ${keycloak_realm_name} -s name=group_mapper -s protocol=openid-connect -s protocolMapper="oidc-group-membership-mapper" \
+          -s 'config."full.path"="false"' -s 'config."id.token.claim"="true"' -s 'config."access.token.claim"="true"' \
+          -s 'config."claim.name"="groups"' -s 'config."userinfo.token.claim"="true"'
+      }
+      main_func () {
+          microdnf install jq
+          hostname=$(hostname)
+          # Authentication
+          count=0
+          if [[ $hostname != "keycloak-0" ]];
+          then
+            echo "Skipping startup script!"
+            exit 0
+          fi
+          while auth
+          do
+          if [[ $RUN == "false" ]] && (( $count < 120 ));
+          then
+              echo "Waiting for Keycloak..."
+              sleep 5
+              count=$((count + 1))
+          elif [[ $RUN == "true" ]];
+          then
+              echo "Authenticated!"
+              break
+          else
+              echo "Timeout error!"
+              exit 1
+          fi
+          done
+          # Check if resource is already exist
+          check_realm
+          # Create resource if it isn't created
+          if [[ $RUN == "false" ]];
+          then
+              configure_keycloak
+          else
+              echo "Realm is already exist!"
+          fi
+      }
+      main_func &
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/keycloak_values.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/keycloak_values.yaml
new file mode 100644
index 0000000..569e4e7
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/keycloak_values.yaml
@@ -0,0 +1,80 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+keycloak:
+  image:
+    tag: "7.0.0"
+  replicas: 1
+  basepath: auth
+  username: ${keycloak_user}
+  password: "${keycloak_password}"
+
+  persistence:
+    dbVendor: mysql
+    dbName: ${mysql_db_name}
+    dbHost: keycloak-mysql
+    dbPort: 3306
+    dbUser: ${mysql_user}
+    dbPassword: "${mysql_user_password}"
+
+  service:
+    type: ClusterIP
+    # nodePort: 31088
+
+  ingress:
+    enabled: true
+    annotations:
+      kubernetes.io/ingress.class: nginx
+      nginx.ingress.kubernetes.io/ssl-redirect: "true"
+      nginx.ingress.kubernetes.io/rewrite-target: /auth
+    path: /auth
+    hosts:
+      - ${ssn_k8s_alb_dns_name}
+    tls:
+      - hosts:
+          - ${ssn_k8s_alb_dns_name}
+        secretName: dlab-ui-tls
+
+  startupScripts:
+    mystartup.sh: |
+      ${configure_keycloak_file}
+
+  extraInitContainers: |
+    - name: theme-provider
+      image: epamdlab/ui-theme:0.1
+      imagePullPolicy: Always
+      command:
+        - sh
+      args:
+        - -c
+        - |
+          echo "Copying theme..."
+          cp -R /dlab/* /theme
+      volumeMounts:
+        - name: theme
+          mountPath: /theme
+  extraVolumeMounts: |
+    - name: theme
+      mountPath: /opt/jboss/keycloak/themes/dlab
+
+  extraVolumes: |
+    - name: theme
+      emptyDir: {}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mongo_values.yaml
similarity index 73%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mongo_values.yaml
index 16da950..15208a4 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mongo_values.yaml
@@ -19,19 +19,21 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+replicaSet:
+  enabled: true
 
+mongodbRootPassword: "${mongo_root_pwd}"
+mongodbUsername: ${mongo_db_username}
+mongodbDatabase: ${mongo_dbname}
+mongodbPassword: "${mongo_db_pwd}"
 
-USER root
+image:
+  tag: ${mongo_image_tag}
 
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
+persistence:
+  enabled: false
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+service:
+  type: ClusterIP
+  port: ${mongo_service_port}
+#  nodePort: ${mongo_node_port}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mysql_keycloak_values.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mysql_keycloak_values.yaml
index 16da950..c9a82bc 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/mysql_keycloak_values.yaml
@@ -19,19 +19,12 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+mysqlRootPassword: "${mysql_root_password}"
+mysqlUser: ${mysql_user}
+mysqlPassword: "${mysql_user_password}"
+mysqlDatabase: ${mysql_db_name}
+imageTag: "5.7.14"
+persistence:
+  enabled: true
+  size: ${mysql_disk_size}Gi
+  storageClass: ${storage_class}
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/nginx_values.yaml
similarity index 78%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/nginx_values.yaml
index d0cfc24..a484a42 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/files/nginx_values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,8 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+controller:
+  service:
+    type: LoadBalancer
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/keycloak.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/keycloak.tf
new file mode 100644
index 0000000..a9ffd62
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/keycloak.tf
@@ -0,0 +1,74 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "configure_keycloak" {
+  template = file("./files/configure_keycloak.sh")
+  vars     = {
+    ssn_k8s_alb_dns_name   = local.ui_host
+    keycloak_user          = var.keycloak_user
+    keycloak_password      = random_string.keycloak_password.result
+    keycloak_client_secret = random_uuid.keycloak_client_secret.result
+    ldap_usernameAttr      = var.ldap_usernameAttr
+    ldap_rdnAttr           = var.ldap_rdnAttr
+    ldap_uuidAttr          = var.ldap_uuidAttr
+    ldap_host              = var.ldap_host
+    ldap_users_group       = var.ldap_users_group
+    ldap_dn                = var.ldap_dn
+    ldap_user              = var.ldap_user
+    ldap_bind_creds        = var.ldap_bind_creds
+    keycloak_realm_name    = var.keycloak_realm_name
+    keycloak_client_id     = var.keycloak_client_id
+  }
+}
+
+data "template_file" "keycloak_values" {
+  template = file("./files/keycloak_values.yaml")
+  vars = {
+    keycloak_user           = var.keycloak_user
+    keycloak_password       = random_string.keycloak_password.result
+    ssn_k8s_alb_dns_name    = local.ui_host
+    configure_keycloak_file = data.template_file.configure_keycloak.rendered
+    mysql_db_name           = var.mysql_keycloak_db_name
+    mysql_user              = var.mysql_keycloak_user
+    mysql_user_password     = random_string.mysql_keycloak_user_password.result
+    # replicas_count          = var.ssn_k8s_workers_count > 3 ? 3 : var.ssn_k8s_workers_count
+  }
+}
+
+data "helm_repository" "codecentric" {
+  name = "codecentric"
+  url  = "https://codecentric.github.io/helm-charts"
+}
+
+resource "helm_release" "keycloak" {
+  name       = "keycloak"
+  repository = data.helm_repository.codecentric.metadata.0.name
+  chart      = "codecentric/keycloak"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  wait       = true
+  timeout    = 600
+
+  values     = [
+    data.template_file.keycloak_values.rendered
+  ]
+  depends_on = [helm_release.keycloak-mysql, kubernetes_secret.keycloak_password_secret, helm_release.nginx,
+                helm_release.dlab_ui]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/main.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/main.tf
new file mode 100644
index 0000000..49b9fb2
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/main.tf
@@ -0,0 +1,62 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+provider "helm" {
+    install_tiller  = true
+    namespace       = "kube-system"
+    service_account = "tiller"
+    tiller_image    = "gcr.io/kubernetes-helm/tiller:v2.15.0"
+}
+
+provider "kubernetes" {}
+
+resource "kubernetes_namespace" "dlab-namespace" {
+  metadata {
+    annotations = {
+      name = var.namespace_name
+    }
+
+    name = var.namespace_name
+  }
+}
+
+resource "kubernetes_namespace" "cert-manager-namespace" {
+  metadata {
+    annotations = {
+      name = "cert-manager"
+    }
+    labels = {
+      "certmanager.k8s.io/disable-validation" = "true"
+    }
+
+    name = "cert-manager"
+  }
+}
+
+resource "kubernetes_storage_class" "dlab-storage-class" {
+  metadata {
+    name = "aws-ebs"
+  }
+  storage_provisioner = "kubernetes.io/aws-ebs"
+  reclaim_policy      = "Delete"
+  parameters = {
+    type = "gp2"
+  }
+}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mongo.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mongo.tf
new file mode 100644
index 0000000..7ec345d
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mongo.tf
@@ -0,0 +1,45 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "mongo_values" {
+  template = file("./files/mongo_values.yaml")
+  vars     = {
+      mongo_root_pwd      = random_string.mongo_root_password.result
+      mongo_db_username   = var.mongo_db_username
+      mongo_dbname        = var.mongo_dbname
+      mongo_db_pwd        = random_string.mongo_db_password.result
+      mongo_image_tag     = var.mongo_image_tag
+      mongo_service_port  = var.mongo_service_port
+      mongo_node_port     = var.mongo_node_port
+  }
+}
+
+resource "helm_release" "mongodb" {
+  name       = "mongo-ha"
+  chart      = "stable/mongodb"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  wait       = true
+  values     = [
+      data.template_file.mongo_values.rendered
+  ]
+  depends_on = [helm_release.nginx, kubernetes_secret.mongo_db_password_secret,
+                kubernetes_secret.mongo_root_password_secret]
+}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mysql.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mysql.tf
new file mode 100644
index 0000000..d55903d
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/mysql.tf
@@ -0,0 +1,44 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "keycloak-mysql-values" {
+  template = file("./files/mysql_keycloak_values.yaml")
+  vars = {
+    mysql_root_password = random_string.mysql_root_password.result
+    mysql_user          = var.mysql_keycloak_user
+    mysql_user_password = random_string.mysql_keycloak_user_password.result
+    mysql_db_name       = var.mysql_keycloak_db_name
+    storage_class       = kubernetes_storage_class.dlab-storage-class.metadata[0].name
+    mysql_disk_size     = var.mysql_disk_size
+  }
+}
+
+resource "helm_release" "keycloak-mysql" {
+  name       = "keycloak-mysql"
+  chart      = "stable/mysql"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  wait       = true
+  values     = [
+    data.template_file.keycloak-mysql-values.rendered
+  ]
+  depends_on = [kubernetes_secret.mysql_root_password_secret, kubernetes_secret.mysql_keycloak_user_password_secret,
+                helm_release.nginx]
+}
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/nginx.tf
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/nginx.tf
index 16da950..e03a1a3 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/nginx.tf
@@ -19,19 +19,15 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+resource "helm_release" "nginx" {
+    name       = "nginx-ingress"
+    chart      = "stable/nginx-ingress"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    wait       = true
 
+    depends_on = [null_resource.step_ca_delay]
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+    values     = [
+        file("files/nginx_values.yaml")
+    ]
+}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/outputs.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/outputs.tf
new file mode 100644
index 0000000..b0cd25a
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/outputs.tf
@@ -0,0 +1,64 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+output "keycloak_client_secret" {
+    value = random_uuid.keycloak_client_secret.result
+}
+
+output "keycloak_auth_server_url" {
+    value = "https://${local.ui_host}/auth"
+}
+
+output "keycloak_realm_name" {
+    value = var.keycloak_realm_name
+}
+
+output "keycloak_user_name" {
+    value = var.keycloak_user
+}
+
+output "keycloak_user_password" {
+    value = random_string.keycloak_password.result
+}
+
+output "keycloak_client_id" {
+    value = var.keycloak_client_id
+}
+
+output "ssn_ui_host" {
+    value = local.ui_host
+}
+
+output "step_root_ca" {
+    value = lookup(data.external.step-ca-config-values.result, "rootCa")
+}
+
+output "step_kid" {
+    value = lookup(data.external.step-ca-config-values.result, "kid")
+}
+
+output "step_kid_password" {
+    value = random_string.step_ca_provisioner_password.result
+}
+
+output "step_ca_url" {
+    value = "https://${var.ssn_k8s_nlb_dns_name}:443"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/secrets.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/secrets.tf
new file mode 100644
index 0000000..5a78c41
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/secrets.tf
@@ -0,0 +1,162 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+resource "random_uuid" "keycloak_client_secret" {}
+
+resource "random_string" "ssn_keystore_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "keycloak_client_secret" {
+  metadata {
+    name       = "keycloak-client-secret"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    client_secret = random_uuid.keycloak_client_secret.result
+  }
+}
+
+resource "random_string" "keycloak_password" {
+  length = 16
+  special = false
+}
+
+
+resource "kubernetes_secret" "keycloak_password_secret" {
+  metadata {
+    name       = "keycloak-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.keycloak_password.result
+  }
+}
+
+resource "random_string" "mongo_root_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mongo_root_password_secret" {
+  metadata {
+    name       = "mongo-root-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mongo_root_password.result
+  }
+}
+
+resource "random_string" "mongo_db_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mongo_db_password_secret" {
+  metadata {
+    name       = "mongo-db-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mongo_db_password.result
+  }
+}
+
+resource "random_string" "mysql_root_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mysql_root_password_secret" {
+  metadata {
+    name       = "mysql-root-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mysql_root_password.result
+  }
+}
+
+resource "random_string" "mysql_keycloak_user_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mysql_keycloak_user_password_secret" {
+  metadata {
+    name       = "mysql-keycloak-user-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mysql_keycloak_user_password.result
+  }
+}
+
+resource "kubernetes_secret" "ssn_keystore_password" {
+  metadata {
+    name       = "ssn-keystore-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.ssn_keystore_password.result
+  }
+}
+
+resource "random_string" "step_ca_password" {
+  length = 8
+  special = false
+}
+
+resource "kubernetes_secret" "step_ca_password_secret" {
+  metadata {
+    name       = "step-ca-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.step_ca_password.result
+  }
+}
+
+resource "random_string" "step_ca_provisioner_password" {
+  length = 8
+  special = false
+}
+
+resource "kubernetes_secret" "step_ca_provisioner_password_secret" {
+  metadata {
+    name       = "step-ca-provisioner-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.step_ca_provisioner_password.result
+  }
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/.helmignore
index 951fdd7..2f795d4 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/Chart.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/Chart.yaml
new file mode 100644
index 0000000..e9d93e2
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/Chart.yaml
@@ -0,0 +1,52 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+apiVersion: v1
+appVersion: 0.13.2
+description: An online certificate authority and related tools for secure automated
+  certificate management, so you can use TLS everywhere.
+engine: gotpl
+home: https://smallstep.com
+icon: https://raw.githubusercontent.com/smallstep/certificates/master/icon.png
+keywords:
+  - acme
+- authority
+- ca
+- certificate
+- certificates
+- certificate-authority
+- kubernetes
+- pki
+- security
+- security-tools
+- smallstep
+- ssh
+- step
+- step-ca
+- tls
+- x509
+maintainers:
+- email: mariano@smallstep.com
+  name: Mariano Cano
+name: step-certificates
+sources:
+- https://github.com/smallstep/certificates
+version: 1.13.2
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/NOTES.txt b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/NOTES.txt
new file mode 100644
index 0000000..43f6544
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/NOTES.txt
@@ -0,0 +1,13 @@
+
+Thanks for installing Step CA.
+
+1. Get the PKI and Provisioner secrets running these commands:
+   kubectl get -n {{ .Release.Namespace }} -o jsonpath='{.data.password}' secret/{{ include "step-certificates.fullname" . }}-ca-password | base64 --decode
+   kubectl get -n {{ .Release.Namespace }} -o jsonpath='{.data.password}' secret/{{ include "step-certificates.fullname" . }}-provisioner-password | base64 --decode
+{{ if .Release.IsInstall }}
+2. Get the CA URL and the root certificate fingerprint running this command:
+   kubectl -n {{ .Release.Namespace }} logs job.batch/{{ .Release.Name }}
+
+3. Delete the configuration job running this command:
+   kubectl -n {{ .Release.Namespace }} delete job.batch/{{ .Release.Name }}
+{{ end -}}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..b65f748
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/_helpers.tpl
@@ -0,0 +1,88 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "step-certificates.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "step-certificates.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "step-certificates.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "step-certificates.labels" -}}
+helm.sh/chart: {{ include "step-certificates.chart" . }}
+app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Create CA URL
+*/}}
+{{- define "step-certificates.url" -}}
+{{- if .Values.ca.url -}}
+{{- .Values.ca.url -}}
+{{- else -}}
+{{- printf "https://%s.%s.svc.cluster.local" (include "step-certificates.fullname" .) .Release.Namespace -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create CA DNS
+*/}}
+{{- define "step-certificates.dns" -}}
+{{- if .Values.ca.dns -}}
+{{- .Values.ca.dns -}}
+{{- else -}}
+{{- printf "%s.%s.svc.cluster.local,127.0.0.1" (include "step-certificates.fullname" .) .Release.Namespace -}}
+{{- end -}}
+{{- end -}}l
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/bootstrap.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/bootstrap.yaml
new file mode 100644
index 0000000..354c144
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/bootstrap.yaml
@@ -0,0 +1,60 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+  {{- if .Release.IsInstall -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: "{{.Release.Name}}"
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+spec:
+  template:
+    metadata:
+      name: "{{.Release.Name}}"
+      labels:
+        app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+    spec:
+      serviceAccountName: {{ include "step-certificates.fullname" . }}-config
+      restartPolicy: Never
+      volumes:
+        - name: bootstrap
+          configMap:
+            name: {{ include "step-certificates.fullname" . }}-bootstrap
+      containers:
+        - name: config
+          image: "{{ .Values.bootstrapImage.repository }}:{{ .Values.bootstrapImage.tag }}"
+          imagePullPolicy: {{ .Values.bootstrapImage.pullPolicy }}
+          command: ["/bin/sh", "/home/step/bootstrap/bootstrap.sh"]
+          volumeMounts:
+            - name: bootstrap
+              mountPath: /home/step/bootstrap
+              readOnly: true
+{{- end -}}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/ca.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/ca.yaml
new file mode 100644
index 0000000..24ed08e
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/ca.yaml
@@ -0,0 +1,158 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{ include "step-certificates.fullname" . }}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+spec:
+  # Only one replica is supported at this moment
+  # Requested {{ .Values.replicaCount }}
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+      app.kubernetes.io/instance: {{ .Release.Name }}
+  serviceName: {{ include "step-certificates.fullname" . }}
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+    spec:
+{{- if .Release.IsInstall }}
+initContainers:
+  - name: {{ .Chart.Name }}-init
+    image: busybox:latest
+    imagePullPolicy: {{ .Values.image.pullPolicy }}
+    command: ["sleep", "20"]
+{{- end }}
+securityContext:
+  {{- if .Values.ca.runAsRoot }}
+  runAsUser: 0
+  {{- else }}
+  runAsUser: 1000
+  runAsNonRoot: true
+  runAsGroup: 1000
+  fsGroup: 1000
+  allowPrivilegeEscalation: false
+  {{- end }}
+containers:
+  - name: {{ .Chart.Name }}
+    image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+    imagePullPolicy: {{ .Values.image.pullPolicy }}
+    command: ["/usr/local/bin/step-ca",
+              "--password-file", "/home/step/secrets/passwords/password",
+              "/home/step/config/ca.json"]
+    env:
+      - name: NAMESPACE
+        value: "{{ .Release.Namespace }}"
+    ports:
+      - name: https
+        containerPort: {{ .Values.service.targetPort }}
+        protocol: TCP
+    livenessProbe:
+      initialDelaySeconds: 5
+      httpGet:
+        path: /health
+        port: {{ .Values.service.targetPort }}
+        scheme: HTTPS
+    readinessProbe:
+      initialDelaySeconds: 5
+      httpGet:
+        path: /health
+        port: {{ .Values.service.targetPort }}
+        scheme: HTTPS
+    resources:
+      {{- toYaml .Values.resources | nindent 12 }}
+    volumeMounts:
+      - name: certs
+        mountPath: /home/step/certs
+        readOnly: true
+      - name: config
+        mountPath: /home/step/config
+        readOnly: true
+      - name: secrets
+        mountPath: /home/step/secrets
+        readOnly: true
+      - name: ca-password
+        mountPath: /home/step/secrets/passwords
+        readOnly: true
+    {{- if .Values.ca.db.enabled }}
+    - name: database
+      mountPath: /home/step/db
+      readOnly: false
+    {{- end }}
+volumes:
+  - name: certs
+    configMap:
+      name: {{ include "step-certificates.fullname" . }}-certs
+  - name: config
+configMap:
+  name: {{ include "step-certificates.fullname" . }}-config
+  - name: secrets
+configMap:
+  name: {{ include "step-certificates.fullname" . }}-secrets
+  - name: ca-password
+secret:
+  secretName: {{ include "step-certificates.fullname" . }}-ca-password
+  {{- if and .Values.ca.db.enabled (not .Values.ca.db.persistent) }}
+  - name: database
+emptyDir: {}
+  {{- end }}
+  {{- with .Values.nodeSelector }}
+nodeSelector:
+  {{- toYaml . | nindent 8 }}
+  {{- end }}
+  {{- with .Values.affinity }}
+affinity:
+  {{- toYaml . | nindent 8 }}
+  {{- end }}
+  {{- with .Values.tolerations }}
+tolerations:
+  {{- toYaml . | nindent 8 }}
+  {{- end }}
+{{- if and .Values.ca.db.enabled .Values.ca.db.persistent }}
+volumeClaimTemplates:
+  - metadata:
+      name: database
+      labels:
+        app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+        app.kubernetes.io/managed-by: {{ .Release.Service }}
+    spec:
+      accessModes:
+      {{- range .Values.ca.db.accessModes }}
+      - {{ . | quote }}
+      {{- end }}
+      resources:
+        requests:
+          storage: {{ .Values.ca.db.size | quote }}
+    {{- if .Values.ca.db.storageClass }}
+    {{- if (eq "-" .Values.ca.db.storageClass) }}
+    storageClassName: ""
+    {{- else }}
+    storageClassName: {{ .Values.ca.db.storageClass | quote }}
+    {{- end }}
+    {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/configmaps.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/configmaps.yaml
new file mode 100644
index 0000000..1670d9a
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/configmaps.yaml
@@ -0,0 +1,167 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+# ConfigMaps that will be updated by the configuration job:
+# 1. Step CA config directory.
+# 2. Step CA certs direcotry.
+# 3. Step CA secrets directory.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-certs
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: v1
+data:
+  intermediate_ca_key: ""
+  root_ca_key: ""
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-secrets
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-bootstrap
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+data:
+  bootstrap.sh: |-
+    #!/bin/sh
+    STEPPATH=/home/step
+    echo -e "\e[1mWelcome to Step Certificates configuration.\e[0m\n"
+
+    function permission_error () {
+      echo -e "\033[0;31mPERMISSION ERROR:\033[0m $1\n"
+      exit 1
+    }
+
+    function kbreplace() {
+      kubectl $@ -o yaml --dry-run | kubectl replace -f -
+    }
+
+    echo -e "\e[1mConfiguring kubctl with service account...\e[0m"
+    # Use the service account context
+    kubectl config set-cluster cfc --server=https://kubernetes.default --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    kubectl config set-credentials bootstrap --token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
+    kubectl config set-context cfc --cluster=cfc --user=bootstrap
+    kubectl config use-context cfc
+
+    echo -e "\n\e[1mChecking cluster permissions...\e[0m"
+    echo -n "Checking for permission to create configmaps in {{.Release.Namespace}} namespace: "
+    kubectl auth can-i create configmaps --namespace {{.Release.Namespace}}
+    if [ $? -ne 0 ]; then
+      permission_error "create configmaps"
+    fi
+
+    echo -n "Checking for permission to create secrets in {{.Release.Namespace}} namespace: "
+    kubectl auth can-i create secrets --namespace {{.Release.Namespace}}
+    if [ $? -ne 0 ]; then
+      permission_error "create secrets"
+    fi
+{{ if .Values.autocert.enabled }}
+echo -n "Checking for permission to create mutatingwebhookconfiguration in {{.Release.Namespace}} namespace: "
+    kubectl auth can-i create mutatingwebhookconfiguration --namespace {{.Release.Namespace}}
+    if [ $? -ne 0 ]; then
+      permission_error "create mutatingwebhookconfiguration"
+  fi
+{{- end }}
+
+# Setting this here on purpose, after the above section which explicitly checks
+# for and handles exit errors.
+  set -e
+
+  echo -e "\n\e[1mInitializating the CA...\e[0m"
+
+# CA password
+{{- if .Values.ca.password }}
+  CA_PASSWORD={{ quote .Values.ca.password }}
+{{- else }}
+  CA_PASSWORD=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32 ; echo '')
+{{- end }}
+# Provisioner password
+{{- if .Values.ca.provisioner.password }}
+  CA_PROVISIONER_PASSWORD={{ quote .Values.ca.provisioner.password }}
+{{- else }}
+  CA_PROVISIONER_PASSWORD=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32 ; echo '')
+{{- end }}
+
+  TMP_CA_PASSWORD=$(mktemp /tmp/autocert.XXXXXX)
+  TMP_CA_PROVISIONER_PASSWORD=$(mktemp /tmp/autocert.XXXXXX)
+
+  echo $CA_PASSWORD > $TMP_CA_PASSWORD
+  echo $CA_PROVISIONER_PASSWORD > $TMP_CA_PROVISIONER_PASSWORD
+
+  step ca init \
+  --name "{{.Values.ca.name}}" \
+  --dns "{{include "step-certificates.dns" .}}" \
+  --address "{{.Values.ca.address}}" \
+  --provisioner "{{.Values.ca.provisioner.name}}" \
+  --with-ca-url "{{include "step-certificates.url" .}}" \
+  --password-file "$TMP_CA_PASSWORD" \
+  --provisioner-password-file "$TMP_CA_PROVISIONER_PASSWORD" {{ if not .Values.ca.db.enabled }}--no-db{{ end }}
+
+  rm -f $TMP_CA_PASSWORD $TMP_CA_PROVISIONER_PASSWORD
+
+  echo -e "\n\e[1mCreating configmaps and secrets in {{.Release.Namespace}} namespace ...\e[0m"
+
+  # Replace secrets created on helm install
+  # It allows to properly remove them on helm delete
+  kbreplace -n {{.Release.Namespace}} create configmap {{ include "step-certificates.fullname" . }}-config --from-file $(step path)/config
+  kbreplace -n {{.Release.Namespace}} create configmap {{ include "step-certificates.fullname" . }}-certs --from-file $(step path)/certs
+  kbreplace -n {{.Release.Namespace}} create configmap {{ include "step-certificates.fullname" . }}-secrets --from-file $(step path)/secrets
+
+  kbreplace -n {{.Release.Namespace}} create secret generic {{ include "step-certificates.fullname" . }}-ca-password --from-literal "password=${CA_PASSWORD}"
+  kbreplace -n {{.Release.Namespace}} create secret generic {{ include "step-certificates.fullname" . }}-provisioner-password --from-literal "password=${CA_PROVISIONER_PASSWORD}"
+
+# Label all configmaps and secrets
+kubectl -n {{.Release.Namespace}} label configmap {{ include "step-certificates.fullname" . }}-config {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label configmap {{ include "step-certificates.fullname" . }}-certs {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label configmap {{ include "step-certificates.fullname" . }}-secrets {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label secret {{ include "step-certificates.fullname" . }}-ca-password {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label secret {{ include "step-certificates.fullname" . }}-provisioner-password {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+
+# Patch webhook if autocert is enabled
+{{ if .Values.autocert.enabled }}
+  CA_BUNDLE=$(cat $(step path)/certs/root_ca.crt | base64 | tr -d '\n')
+  kubectl patch mutatingwebhookconfigurations {{ .Release.Name }}-autocert-webhook-config \
+  --type json -p="[{\"op\":\"replace\",\"path\":\"/webhooks/0/clientConfig/caBundle\",\"value\":\"$CA_BUNDLE\"}]"
+{{- end }}
+
+  echo -e "\n\e[1mStep Certificates installed!\e[0m"
+  echo
+echo "CA URL: {{include "step-certificates.url" .}}"
+echo "CA Fingerprint: $(step certificate fingerprint $(step path)/certs/root_ca.crt)"
+  echo
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/ingress.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/ingress.yaml
new file mode 100644
index 0000000..240bdaf
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/ingress.yaml
@@ -0,0 +1,57 @@
+# *****************************************************************************
+#
+  # Licensed to the Apache Software Foundation (ASF) under one
+  # or more contributor license agreements.  See the NOTICE file
+  # distributed with this work for additional information
+  # regarding copyright ownership.  The ASF licenses this file
+  # to you under the Apache License, Version 2.0 (the
+  # "License"); you may not use this file except in compliance
+  # with the License.  You may obtain a copy of the License at
+  #
+  #   http://www.apache.org/licenses/LICENSE-2.0
+  #
+  # Unless required by applicable law or agreed to in writing,
+  # software distributed under the License is distributed on an
+  # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  # KIND, either express or implied.  See the License for the
+  # specific language governing permissions and limitations
+  # under the License.
+  #
+  # ******************************************************************************
+
+  {{- if .Values.ingress.enabled -}}
+  {{- $fullName := include "step-certificates.fullname" . -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: {{ $fullName }}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+  {{- with .Values.ingress.annotations }}
+annotations:
+  {{- toYaml . | nindent 4 }}
+  {{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+tls:
+  {{- range .Values.ingress.tls }}
+- hosts:
+  {{- range .hosts }}
+  - {{ . | quote }}
+  {{- end }}
+  secretName: {{ .secretName }}
+  {{- end }}
+{{- end }}
+rules:
+  {{- range .Values.ingress.hosts }}
+- host: {{ .host | quote }}
+  http:
+    paths:
+    {{- range .paths }}
+    - path: {{ . }}
+      backend:
+        serviceName: {{ $fullName }}
+        servicePort: http
+    {{- end }}
+  {{- end }}
+{{- end }}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/rbac.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/rbac.yaml
new file mode 100644
index 0000000..0534856
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/rbac.yaml
@@ -0,0 +1,93 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+  {{- if .Release.IsInstall -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+rules:
+- apiGroups: [""]
+  resources: ["secrets", "configmaps"]
+  verbs: ["get", "create", "update", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+subjects:
+- kind: ServiceAccount
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+roleRef:
+  kind: Role
+  name: {{ include "step-certificates.fullname" . }}-config
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+rules:
+- apiGroups: ["admissionregistration.k8s.io"]
+  resources: ["mutatingwebhookconfigurations"]
+  verbs: ["get", "create", "update", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+subjects:
+- kind: ServiceAccount
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+roleRef:
+  kind: ClusterRole
+  name: {{ include "step-certificates.fullname" . }}-config
+  apiGroup: rbac.authorization.k8s.io
+{{- end -}}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/secrets.yaml
similarity index 67%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/secrets.yaml
index 951fdd7..68d0b8d 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/secrets.yaml
@@ -19,22 +19,21 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Secrets that will be updated by the configuration job:
+# 1. CA keys password.
+# 2. Provisioner password.
+apiVersion: v1
+data:
+  password: ""
+kind: Secret
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-ca-password
+  namespace: {{.Release.Namespace}}
+---
+apiVersion: v1
+data:
+  password: ""
+kind: Secret
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-provisioner-password
+  namespace: {{.Release.Namespace}}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/service.yaml
similarity index 63%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/service.yaml
index 951fdd7..dccae38 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/service.yaml
@@ -19,22 +19,22 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "step-certificates.fullname" . }}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    - port: {{ .Values.service.port }}
+      targetPort: {{ .Values.service.targetPort }}
+{{- if .Values.service.nodePort }}
+nodePort: {{ .Values.service.nodePort }}
+{{- end }}
+protocol: TCP
+name: https
+selector:
+  app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+  app.kubernetes.io/instance: {{ .Release.Name }}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/tests/test-connection.yaml
similarity index 69%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/tests/test-connection.yaml
index 951fdd7..4fe296d 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/templates/tests/test-connection.yaml
@@ -19,22 +19,18 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ include "step-certificates.fullname" . }}-test-connection"
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+annotations:
+  "helm.sh/hook": test-success
+spec:
+  containers:
+    - name: wget
+      image: busybox
+      command: ['wget']
+      args:  ['{{ include "step-certificates.fullname" . }}:{{ .Values.service.port }}']
+  restartPolicy: Never
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/values.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/values.yaml
new file mode 100644
index 0000000..14a3d3d
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-chart/values.yaml
@@ -0,0 +1,124 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+# Default values for step-certificates.
+
+# replicaCount is the number of replicas of step-certificates.
+# Only one replica is supported at this time.
+replicaCount: 1
+
+# nameOverride overrides the name of the chart.
+nameOverride: ""
+# fullnameOverride overrides the full name of the chart.
+fullnameOverride: ""
+
+# image contains the docker image for step-certificates.
+image:
+  repository: smallstep/step-ca
+  tag: 0.13.2
+  pullPolicy: IfNotPresent
+
+# bootstrapImage contains the docker image for the bootstrap of the configuration.
+bootstrapImage:
+  repository: smallstep/step-ca-bootstrap
+  tag: latest
+  pullPolicy: IfNotPresent
+
+# service contains configuration for the kubernetes service.
+service:
+  type: NodePort
+  nodePort: 32433
+  port: 443
+  targetPort: 9000
+
+# ca contains the certificate authority configuration.
+ca:
+  # name is new public key infrastructure (PKI) names.
+  name: dlab-step-ca
+  # address is the HTTP listener address of step-certificates.
+  address: :9000
+  # dns is the comma separated dns names to use. Leave it empty to use the format:
+  # {include "step-certificates.fullname" .}.{ .Release.Namespace}.svc.cluster.local,127.0.0.1
+  dns: ${ssn_k8s_nlb_dns_name}
+  # url is the http url where step-certificates will listen at. Leave it empty to use the format
+  # https://{{ include "step-certificates.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
+  url: https://${ssn_k8s_nlb_dns_name}
+  # password is the password used to encrypt the keys. Leave it empty to generate a random one.
+  password: ${step_ca_password}
+  # provisioner contains the step-certificates provisioner configuration.
+  provisioner:
+    # name is the new provisioner name.
+    name: admin
+    # password is the password used to encrypt the provisioner private key.
+    password: ${step_ca_provisioner_password}
+  # db contains the step-certificate dataabase configuration.
+  db:
+    # enabled defines if the database is enabled.
+    enabled: true
+    # persistent defines if a Persistent Volume Claim is used, if false and emptyDir will be used.
+    persistent: true
+    # storeageClass is Persistent Volume Storage Class
+    # If defined, storageClassName: <storageClass>.
+    # If set to "-", storageClassName: "", which disables dynamic provisioning.
+    # If undefined or set to null, no storageClassName spec is set, choosing the
+    # default provisioner (gp2 on AWS, standard on GKE, AWS & OpenStack).
+    storageClass: ${storage_class_name}
+    # accessModes defines the Persistent Volume Access Mode.
+    accessModes:
+      - ReadWriteOnce
+    # size is the Persistent Volume size.
+    size: 10Gi
+  # runAsRoot runs the ca as root instead of the step user. This is required in
+  # some storage provisioners.
+  runAsRoot: false
+
+# autocert is used to configure the autocert chart that depends on step-certificates.
+autocert:
+  enabled: false
+
+# ingress contains the configuration for an ingress controller.
+ingress:
+  enabled: false
+  annotations: {}
+  hosts: []
+  tls: []
+
+# resources contains the CPU/memory resource requests/limits.
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  # limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  # requests:
+  #   cpu: 100m
+#   memory: 128Mi
+
+# nodeSelector contains the node labels for pod assignment.
+nodeSelector: {}
+
+# tolerations contains the toleration labels for pod assignment.
+tolerations: []
+
+# affinity contains the affinity settings for pod assignment.
+affinity: {}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/Chart.yaml
index 16da950..cbb683a 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: step-ca-issuer
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..66e3377
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "step-ca-issuer.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "step-ca-issuer.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "step-ca-issuer.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "step-ca-issuer.labels" -}}
+app.kubernetes.io/name: {{ include "step-ca-issuer.name" . }}
+helm.sh/chart: {{ include "step-ca-issuer.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/stepissuer.yaml
similarity index 71%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/stepissuer.yaml
index 951fdd7..caeeb92 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/templates/stepissuer.yaml
@@ -1,3 +1,4 @@
+{{- /*
 # *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -18,23 +19,19 @@
 # under the License.
 #
 # ******************************************************************************
+*/ -}}
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: certmanager.step.sm/v1beta1
+kind: StepIssuer
+metadata:
+  name: step-issuer
+  namespace: {{ .Values.namespace }}
+spec:
+  url: {{ .Values.ca_url }}
+  caBundle:  {{ .Values.ca_bundle }}
+  provisioner:
+    name: {{ .Values.kid_name }}
+    kid: {{ .Values.kid }}
+    passwordRef:
+      name: step-certificates-provisioner-password
+      key: password
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/values.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/values.yaml
index 16da950..0cb4b94 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca-issuer-chart/values.yaml
@@ -19,19 +19,14 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+replicaCount: 1
 
+ingress:
+  enabled: false
+labels: {}
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+ca_url: ${step_ca_url}
+ca_bundle: ${step_ca_bundle}
+namespace: ${namespace}
+kid_name: ${step_ca_kid_name}
+kid: ${step_ca_kid}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca.tf
new file mode 100644
index 0000000..0361fa0
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-ca.tf
@@ -0,0 +1,52 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "step_ca_values" {
+  template = file("./step-ca-chart/values.yaml")
+  vars = {
+    storage_class_name           = kubernetes_storage_class.dlab-storage-class.metadata[0].name
+    ssn_k8s_nlb_dns_name         = var.ssn_k8s_nlb_dns_name
+    step_ca_password             = random_string.step_ca_password.result
+    step_ca_provisioner_password = random_string.step_ca_provisioner_password.result
+  }
+}
+
+resource "helm_release" "step_ca" {
+  name       = "step-certificates"
+  chart      = "./step-ca-chart"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  depends_on = [null_resource.cert_manager_delay]
+  wait       = false
+  timeout    = 600
+
+  values     = [
+    data.template_file.step_ca_values.rendered
+  ]
+}
+
+resource "null_resource" "step_ca_delay" {
+  provisioner "local-exec" {
+    command = "sleep 120"
+  }
+  triggers = {
+    "before" = helm_release.step_ca.name
+  }
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/Chart.yaml
index 16da950..832b44c 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: step-issuer
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..9cd3910
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "step-issuer.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "step-issuer.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "step-issuer.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "step-issuer.labels" -}}
+app.kubernetes.io/name: {{ include "step-issuer.name" . }}
+helm.sh/chart: {{ include "step-issuer.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/deployment.yaml b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/deployment.yaml
new file mode 100644
index 0000000..c010d77
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/templates/deployment.yaml
@@ -0,0 +1,360 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: controller-manager
+  name: step-issuer-system
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  name: stepissuers.certmanager.step.sm
+spec:
+  group: certmanager.step.sm
+  names:
+    kind: StepIssuer
+    plural: stepissuers
+  scope: ""
+  validation:
+    openAPIV3Schema:
+      description: StepIssuer is the Schema for the stepissuers API
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          description: StepIssuerSpec defines the desired state of StepIssuer
+          properties:
+            caBundle:
+              description: CABundle is a base64 encoded TLS certificate used to verify
+                connections to the step certificates server. If not set the system
+                root certificates are used to validate the TLS connection.
+              format: byte
+              type: string
+            provisioner:
+              description: Provisioner contains the step certificates provisioner
+                configuration.
+              properties:
+                kid:
+                  description: KeyID is the kid property of the JWK provisioner.
+                  type: string
+                name:
+                  description: Names is the name of the JWK provisioner.
+                  type: string
+                passwordRef:
+                  description: PasswordRef is a reference to a Secret containing the
+                    provisioner password used to decrypt the provisioner private key.
+                  properties:
+                    key:
+                      description: The key of the secret to select from. Must be a
+                        valid secret key.
+                      type: string
+                    name:
+                      description: The name of the secret in the pod's namespace to
+                        select from.
+                      type: string
+                  required:
+                  - name
+                  type: object
+              required:
+              - kid
+              - name
+              - passwordRef
+              type: object
+            url:
+              description: URL is the base URL for the step certificates instance.
+              type: string
+          required:
+          - provisioner
+          - url
+          type: object
+        status:
+          description: StepIssuerStatus defines the observed state of StepIssuer
+          properties:
+            conditions:
+              items:
+                description: StepIssuerCondition contains condition information for
+                  the step issuer.
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    enum:
+                    - Ready
+                    type: string
+                required:
+                - status
+                - type
+                type: object
+              type: array
+          type: object
+      type: object
+  versions:
+  - name: v1beta1
+    served: true
+    storage: true
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: step-issuer-leader-election-role
+  namespace: step-issuer-system
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - configmaps/status
+  verbs:
+  - get
+  - update
+  - patch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  creationTimestamp: null
+  name: step-issuer-manager-role
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - certmanager.k8s.io
+  resources:
+  - certificaterequests
+  verbs:
+  - get
+  - list
+  - update
+  - watch
+- apiGroups:
+  - certmanager.k8s.io
+  resources:
+  - certificaterequests/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - certmanager.step.sm
+  resources:
+  - stepissuers
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - certmanager.step.sm
+  resources:
+  - stepissuers/status
+  verbs:
+  - get
+  - patch
+  - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: step-issuer-proxy-role
+rules:
+- apiGroups:
+  - authentication.k8s.io
+  resources:
+  - tokenreviews
+  verbs:
+  - create
+- apiGroups:
+  - authorization.k8s.io
+  resources:
+  - subjectaccessreviews
+  verbs:
+  - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: step-issuer-leader-election-rolebinding
+  namespace: step-issuer-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: step-issuer-leader-election-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: step-issuer-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: step-issuer-manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: step-issuer-manager-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: step-issuer-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: step-issuer-proxy-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: step-issuer-proxy-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: step-issuer-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    prometheus.io/port: "8443"
+    prometheus.io/scheme: https
+    prometheus.io/scrape: "true"
+  labels:
+    control-plane: controller-manager
+  name: step-issuer-controller-manager-metrics-service
+  namespace: step-issuer-system
+spec:
+  ports:
+  - name: https
+    port: 8443
+    targetPort: https
+  selector:
+    control-plane: controller-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    control-plane: controller-manager
+  name: step-issuer-controller-manager
+  namespace: step-issuer-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      control-plane: controller-manager
+  template:
+    metadata:
+      labels:
+        control-plane: controller-manager
+    spec:
+      containers:
+      - args:
+        - --secure-listen-address=0.0.0.0:8443
+        - --upstream=http://127.0.0.1:8080/
+        - --logtostderr=true
+        - --v=10
+        image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
+        name: kube-rbac-proxy
+        ports:
+        - containerPort: 8443
+          name: https
+      - args:
+        - --metrics-addr=127.0.0.1:8080
+        - --enable-leader-election
+        command:
+        - /manager
+        image: smallstep/step-issuer:0.1.0
+        name: manager
+        resources:
+          limits:
+            cpu: 100m
+            memory: 30Mi
+          requests:
+            cpu: 100m
+            memory: 20Mi
+      terminationGracePeriodSeconds: 10
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/values.yaml
similarity index 77%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/values.yaml
index d0cfc24..0c6d2cf 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer-chart/values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,10 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+replicaCount: 1
+
+ingress:
+  enabled: false
+labels: {}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer.tf
new file mode 100644
index 0000000..8525652
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/step-issuer.tf
@@ -0,0 +1,80 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "step_issuer_values" {
+  template = file("./step-issuer-chart/values.yaml")
+}
+
+resource "helm_release" "step-issuer" {
+    name       = "step-issuer"
+    chart      = "./step-issuer-chart"
+    wait       = true
+    depends_on = [null_resource.step_ca_delay]
+
+    values     = [
+        data.template_file.step_issuer_values.rendered
+    ]
+}
+
+resource "null_resource" "step_issuer_delay" {
+  provisioner "local-exec" {
+    command = "sleep 120"
+  }
+  triggers = {
+    "before" = helm_release.step-issuer.name
+  }
+}
+
+data "template_file" "step_ca_issuer_values" {
+  template = file("./step-ca-issuer-chart/values.yaml")
+  vars     = {
+    step_ca_url      = "https://${var.ssn_k8s_nlb_dns_name}:443"
+    step_ca_bundle   = lookup(data.external.step-ca-config-values.result, "rootCa")
+    namespace        = kubernetes_namespace.dlab-namespace.metadata[0].name
+    step_ca_kid_name = lookup(data.external.step-ca-config-values.result, "kidName")
+    step_ca_kid      = lookup(data.external.step-ca-config-values.result, "kid")
+  }
+}
+
+resource "helm_release" "step-ca-issuer" {
+    name       = "step-ca-issuer"
+    chart      = "./step-ca-issuer-chart"
+    wait       = true
+    depends_on = [null_resource.step_issuer_delay]
+
+    values     = [
+        data.template_file.step_ca_issuer_values.rendered
+    ]
+}
+
+resource "null_resource" "step_ca_issuer_delay" {
+  provisioner "local-exec" {
+    command = "sleep 60"
+  }
+  triggers = {
+    "before" = helm_release.step-ca-issuer.name
+  }
+}
+
+data "external" "step-ca-config-values" {
+  program     = ["sh", "/tmp/get_configmap_values.sh" ]
+  depends_on  = [null_resource.step_issuer_delay]
+}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/variables.tf b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/variables.tf
new file mode 100644
index 0000000..dcc5620
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-helm-charts/main/variables.tf
@@ -0,0 +1,195 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "namespace_name" {
+    default = "dlab"
+}
+
+variable "ssn_k8s_nlb_dns_name" {
+    default = ""
+}
+
+variable "keycloak_user" {
+    default = "dlab-admin"
+}
+
+variable "mysql_keycloak_user" {
+    default = "keycloak"
+}
+
+variable "mysql_keycloak_db_name" {
+    default = "keycloak"
+}
+
+variable "mysql_disk_size" {
+    default = "10"
+}
+
+variable "ldap_usernameAttr" {
+    default = "uid"
+}
+
+variable "ldap_rdnAttr" {
+    default = "uid"
+}
+
+variable "ldap_uuidAttr" {
+    default = "uid"
+}
+
+variable "ldap_users_group" {
+    default = "ou=People"
+}
+
+variable "ldap_dn" {
+    default = "dc=example,dc=com"
+}
+
+variable "ldap_user" {
+    default = "cn=admin"
+}
+
+variable "ldap_bind_creds" {
+    default = ""
+}
+
+variable "ldap_host" {
+    default = ""
+}
+
+variable "mongo_db_username" {
+    default = "admin"
+}
+
+variable "mongo_dbname" {
+    default = "dlabdb"
+}
+
+variable "mongo_image_tag" {
+    default = "4.0.10-debian-9-r13"
+    description = "MongoDB Image tag"
+}
+
+variable "mongo_service_port" {
+    default = "27017"
+}
+
+variable "mongo_node_port" {
+    default = "31017"
+}
+
+variable "mongo_service_name" {
+    default = "mongo-ha-mongodb"
+}
+
+variable "ssn_k8s_workers_count" {
+    default = "2"
+}
+
+//variable "endpoint_eip_address" {}
+
+variable "service_base_name" {
+    default = ""
+}
+
+variable "tag_resource_id" {
+    default = ""
+}
+
+variable "billing_bucket" {
+    default = ""
+}
+
+variable "billing_bucket_path" {
+    default = ""
+}
+
+variable "billing_aws_job_enabled" {
+    default = "false"
+}
+
+variable "billing_aws_account_id" {
+    default = ""
+}
+
+variable "billing_tag" {
+    default = "dlab"
+}
+
+variable "billing_dlab_id" {
+    default = "resource_tags_user_user_tag"
+}
+
+variable "billing_usage_date" {
+    default = "line_item_usage_start_date"
+}
+
+variable "billing_product" {
+    default = "product_product_name"
+}
+
+variable "billing_usage_type" {
+    default = "line_item_usage_type"
+}
+
+variable "billing_usage" {
+    default = "line_item_usage_amount"
+}
+
+variable "billing_cost" {
+    default = "line_item_blended_cost"
+}
+
+variable "billing_resource_id" {
+    default = "line_item_resource_id"
+}
+
+variable "billing_tags" {
+    default = "line_item_operation,line_item_line_item_description"
+}
+
+variable "env_os" {
+    default = "debian"
+}
+
+variable "custom_certs_enabled" {
+    default = "False"
+}
+
+variable "custom_cert_path" {
+    default = ""
+}
+
+variable "custom_key_path" {
+    default = ""
+}
+
+variable "custom_certs_host" {
+    default = ""
+}
+
+variable "keycloak_realm_name" {
+  default = "dlab"
+}
+
+variable "keycloak_client_id" {
+  default = "dlab-ui"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
new file mode 100644
index 0000000..41dfb20
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
@@ -0,0 +1,184 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  subnet_c_id                      = data.aws_subnet.k8s-subnet-c-data == [] ? "" : data.aws_subnet.k8s-subnet-c-data.0.id
+  ssn_k8s_launch_conf_masters_name = "${var.service_base_name}-ssn-lc-masters"
+  ssn_k8s_launch_conf_workers_name = "${var.service_base_name}-ssn-lc-workers"
+  ssn_k8s_ag_masters_name          = "${var.service_base_name}-ssn-masters"
+  ssn_k8s_ag_workers_name          = "${var.service_base_name}-ssn-workers"
+  cluster_name                     = "${var.service_base_name}-k8s-cluster"
+}
+
+data "template_file" "ssn_k8s_masters_user_data" {
+  template = file("./files/masters-user-data.sh")
+  vars = {
+    k8s-asg                    = local.ssn_k8s_ag_masters_name
+    k8s-region                 = var.region
+    k8s-bucket-name            = aws_s3_bucket.ssn_k8s_bucket.id
+    k8s-nlb-dns-name           = aws_lb.ssn_k8s_nlb.dns_name
+    k8s-tg-arn                 = aws_lb_target_group.ssn_k8s_nlb_api_target_group.arn
+    k8s_os_user                = var.os_user
+    kubernetes_version         = var.kubernetes_version
+    cluster_name               = local.cluster_name
+  }
+}
+
+data "template_file" "ssn_k8s_workers_user_data" {
+  template = file("./files/workers-user-data.sh")
+  vars = {
+    k8s-bucket-name    = aws_s3_bucket.ssn_k8s_bucket.id
+    k8s_os_user        = var.os_user
+    kubernetes_version = var.kubernetes_version
+    k8s-nlb-dns-name   = aws_lb.ssn_k8s_nlb.dns_name
+  }
+}
+
+resource "aws_launch_configuration" "ssn_k8s_launch_conf_masters" {
+  name                 = local.ssn_k8s_launch_conf_masters_name
+  image_id             = var.ami
+  instance_type        = var.ssn_k8s_masters_shape
+  key_name             = var.key_name
+  security_groups      = [aws_security_group.ssn_k8s_sg.id]
+  iam_instance_profile = aws_iam_instance_profile.k8s-profile.name
+  root_block_device {
+    volume_type           = "gp2"
+    volume_size           = var.ssn_root_volume_size
+    delete_on_termination = true
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+  user_data = data.template_file.ssn_k8s_masters_user_data.rendered
+}
+
+resource "aws_launch_configuration" "ssn_k8s_launch_conf_workers" {
+  name                 = local.ssn_k8s_launch_conf_workers_name
+  image_id             = var.ami
+  instance_type        = var.ssn_k8s_workers_shape
+  key_name             = var.key_name
+  security_groups      = [aws_security_group.ssn_k8s_sg.id]
+  iam_instance_profile = aws_iam_instance_profile.k8s-profile.name
+  root_block_device {
+    volume_type           = "gp2"
+    volume_size           = var.ssn_root_volume_size
+    delete_on_termination = true
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+  user_data = data.template_file.ssn_k8s_workers_user_data.rendered
+}
+
+resource "aws_autoscaling_group" "ssn_k8s_autoscaling_group_masters" {
+  name                 = local.ssn_k8s_ag_masters_name
+  launch_configuration = aws_launch_configuration.ssn_k8s_launch_conf_masters.name
+  min_size             = var.ssn_k8s_masters_count
+  max_size             = var.ssn_k8s_masters_count
+  vpc_zone_identifier  = compact([data.aws_subnet.k8s-subnet-a-data.id, data.aws_subnet.k8s-subnet-b-data.id,
+                                  local.subnet_c_id])
+  target_group_arns    = [aws_lb_target_group.ssn_k8s_nlb_api_target_group.arn,
+                          # aws_lb_target_group.ssn_k8s_nlb_ss_target_group.arn,
+                          # aws_lb_target_group.ssn_k8s_alb_target_group.arn,
+                          aws_lb_target_group.ssn_k8s_nlb_step_ca_target_group.arn]
+
+  lifecycle {
+    create_before_destroy = true
+  }
+  tags = [
+    {
+      key                 = "Name"
+      value               = local.ssn_k8s_ag_masters_name
+      propagate_at_launch = true
+    },
+    {
+      key                 = local.additional_tag[0]
+      value               = local.additional_tag[1]
+      propagate_at_launch = true
+    },
+    {
+      key                 = var.tag_resource_id
+      value               = "${var.service_base_name}:${local.ssn_k8s_ag_masters_name}"
+      propagate_at_launch = true
+    },
+    {
+      key                 = "${var.service_base_name}-tag"
+      value               = local.ssn_k8s_ag_masters_name
+      propagate_at_launch = true
+    },
+    {
+      key                 = "kubernetes.io/cluster/${local.cluster_name}"
+      value               = "owned"
+      propagate_at_launch = true
+    }
+  ]
+}
+
+resource "aws_autoscaling_group" "ssn_k8s_autoscaling_group_workers" {
+  name                 = local.ssn_k8s_ag_workers_name
+  launch_configuration = aws_launch_configuration.ssn_k8s_launch_conf_workers.name
+  min_size             = var.ssn_k8s_workers_count
+  max_size             = var.ssn_k8s_workers_count
+  vpc_zone_identifier  = compact([data.aws_subnet.k8s-subnet-a-data.id, data.aws_subnet.k8s-subnet-b-data.id,
+                                  local.subnet_c_id])
+
+  lifecycle {
+    create_before_destroy = true
+  }
+  tags = [
+    {
+      key                 = "Name"
+      value               = local.ssn_k8s_ag_workers_name
+      propagate_at_launch = true
+    },
+    {
+      key                 = local.additional_tag[0]
+      value               = local.additional_tag[1]
+      propagate_at_launch = true
+    },
+    {
+      key                 = var.tag_resource_id
+      value               = "${var.service_base_name}:${local.ssn_k8s_ag_workers_name}"
+      propagate_at_launch = true
+    },
+    {
+      key                 = "${var.service_base_name}-tag"
+      value               = local.ssn_k8s_ag_workers_name
+      propagate_at_launch = true
+    },
+    {
+      key                 = "kubernetes.io/cluster/${local.cluster_name}"
+      value               = "owned"
+      propagate_at_launch = true
+    }
+  ]
+}
+
+data "aws_instances" "ssn_k8s_masters_instances" {
+  instance_tags = {
+    Name = aws_autoscaling_group.ssn_k8s_autoscaling_group_masters.name
+  }
+
+  instance_state_names = ["running"]
+  depends_on = [aws_autoscaling_group.ssn_k8s_autoscaling_group_masters]
+}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/masters-user-data.sh b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/masters-user-data.sh
new file mode 100644
index 0000000..8617b6f
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/masters-user-data.sh
@@ -0,0 +1,240 @@
+#!/bin/bash
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+set -ex
+
+check_tokens () {
+RUN=$(aws s3 ls s3://${k8s-bucket-name}/k8s/masters/ > /dev/null && echo "true" || echo "false")
+sleep 5
+}
+
+check_elb_status () {
+RUN=$(aws elbv2 describe-target-health --target-group-arn ${k8s-tg-arn} --region ${k8s-region} | \
+     jq -r '.TargetHealthDescriptions[].TargetHealth.State' | \
+     grep "^healthy" > /dev/null && echo "true" || echo "false")
+sleep 5
+}
+
+# Creating DLab user
+sudo useradd -m -G sudo -s /bin/bash ${k8s_os_user}
+sudo bash -c 'echo "${k8s_os_user} ALL = NOPASSWD:ALL" >> /etc/sudoers'
+sudo mkdir /home/${k8s_os_user}/.ssh
+sudo bash -c 'cat /home/ubuntu/.ssh/authorized_keys > /home/${k8s_os_user}/.ssh/authorized_keys'
+sudo chown -R ${k8s_os_user}:${k8s_os_user} /home/${k8s_os_user}/
+sudo chmod 700 /home/${k8s_os_user}/.ssh
+sudo chmod 600 /home/${k8s_os_user}/.ssh/authorized_keys
+
+sudo apt-get update
+sudo apt-get install -y python-pip jq unzip
+sudo apt-get install -y default-jre
+sudo apt-get install -y default-jdk
+sudo pip install -U pip
+sudo pip install awscli
+
+local_ip=$(curl http://169.254.169.254/latest/meta-data/local-ipv4)
+full_hostname=$(curl http://169.254.169.254/latest/meta-data/hostname)
+first_master_ip=$(aws autoscaling describe-auto-scaling-instances --region ${k8s-region} --output text --query \
+                 "AutoScalingInstances[?AutoScalingGroupName=='${k8s-asg}'].InstanceId" | xargs -n1 aws ec2 \
+                 describe-instances --instance-ids $ID --region ${k8s-region} --query \
+                 "Reservations[].Instances[].PrivateIpAddress" --output text | sort | head -n1)
+
+# installing Docker
+sudo bash -c 'curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -'
+sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce
+sudo systemctl enable docker
+# installing kubeadm, kubelet and kubectl
+sudo apt-get install -y apt-transport-https curl
+sudo bash -c 'curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -'
+sudo bash -c 'echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list'
+sudo apt-get update
+sudo apt-get install -y kubelet=${kubernetes_version} kubeadm=${kubernetes_version} kubectl=${kubernetes_version}
+
+check_tokens
+if [[ $local_ip == "$first_master_ip" ]] && [[ $RUN == "false" ]];then
+cat <<EOF > /tmp/kubeadm-config.yaml
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: ClusterConfiguration
+kubernetesVersion: stable
+apiServer:
+  certSANs:
+  - ${k8s-nlb-dns-name}
+  extraArgs:
+    cloud-provider: aws
+controllerManager:
+  extraArgs:
+    cloud-provider: aws
+    configure-cloud-routes: "false"
+controlPlaneEndpoint: "${k8s-nlb-dns-name}:6443"
+clusterName: "${cluster_name}"
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: InitConfiguration
+nodeRegistration:
+  kubeletExtraArgs:
+    cloud-provider: aws
+EOF
+sudo kubeadm init --config=/tmp/kubeadm-config.yaml --upload-certs --node-name $full_hostname
+while check_elb_status
+do
+    if [[ $RUN == "false" ]];
+    then
+        echo "Waiting for NLB healthy status..."
+    else
+        echo "LB status is healthy!"
+        break
+    fi
+done
+sudo mkdir -p /home/${k8s_os_user}/.kube
+sudo cp -i /etc/kubernetes/admin.conf /home/${k8s_os_user}/.kube/config
+sudo chown -R ${k8s_os_user}:${k8s_os_user} /home/${k8s_os_user}/.kube
+sudo kubeadm token create --print-join-command > /tmp/join_command
+sudo kubeadm init phase upload-certs --upload-certs | grep -v "upload-certs" > /tmp/cert_key
+sudo -i -u ${k8s_os_user} kubectl apply -f \
+     "https://cloud.weave.works/k8s/net?k8s-version=$(sudo -i -u ${k8s_os_user} kubectl version | base64 | tr -d '\n')"
+sudo -i -u ${k8s_os_user} bash -c 'curl -L https://git.io/get_helm.sh | bash'
+cat <<EOF > /tmp/rbac-config.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: tiller
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: tiller
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cluster-admin
+subjects:
+  - kind: ServiceAccount
+    name: tiller
+    namespace: kube-system
+EOF
+sudo -i -u ${k8s_os_user} kubectl create -f /tmp/rbac-config.yaml
+sudo -i -u ${k8s_os_user} helm init --service-account tiller --history-max 200
+sleep 60
+aws s3 cp /tmp/join_command s3://${k8s-bucket-name}/k8s/masters/join_command
+aws s3 cp /tmp/cert_key s3://${k8s-bucket-name}/k8s/masters/cert_key
+sudo rm -f /tmp/join_command
+sudo rm -f /tmp/cert_key
+cat <<EOF > /tmp/get_configmap_values.sh
+#!/bin/bash
+
+ROOT_CA=\$(kubectl get -o jsonpath="{.data['root_ca\.crt']}" configmaps/step-certificates-certs -ndlab | base64 | tr -d '\n')
+KID=\$(kubectl get -o jsonpath="{.data['ca\.json']}" configmaps/step-certificates-config -ndlab | jq -r .authority.provisioners[].key.kid)
+KID_NAME=\$(kubectl get -o jsonpath="{.data['ca\.json']}" configmaps/step-certificates-config -ndlab | jq -r .authority.provisioners[].name)
+jq -n --arg rootCa "\$ROOT_CA" --arg kid "\$KID" --arg kidName "\$KID_NAME" '{rootCa: \$rootCa, kid: \$kid, kidName: \$kidName}'
+EOF
+chown ${k8s_os_user}:${k8s_os_user} /tmp/get_configmap_values.sh
+else
+while check_tokens
+do
+    if [[ $RUN == "false" ]];
+    then
+        echo "Waiting for initial cluster initialization..."
+    else
+        echo "Initial cluster initialized!"
+        break
+    fi
+done
+cat <<EOF > /tmp/node.yaml
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+controlPlane:
+  localAPIEndpoint:
+    advertiseAddress: LOCAL_IP
+  certificateKey: "CERT_KEY"
+discovery:
+  bootstrapToken:
+    apiServerEndpoint: ${k8s-nlb-dns-name}:6443
+    caCertHashes:
+    - 'HASHES'
+    token: TOKEN
+  tlsBootstrapToken: TOKEN
+kind: JoinConfiguration
+nodeRegistration:
+  kubeletExtraArgs:
+    cloud-provider: aws
+  name: NODE_NAME
+EOF
+aws s3 cp s3://${k8s-bucket-name}/k8s/masters/join_command /tmp/join_command
+aws s3 cp s3://${k8s-bucket-name}/k8s/masters/cert_key /tmp/cert_key
+cert_key=$(cat /tmp/cert_key)
+token=$(cat /tmp/join_command | sed 's/--\+/\n/g' | grep "token " | awk '{print $2}')
+hashes=$(cat /tmp/join_command | sed 's/--\+/\n/g' | grep "discovery-token-ca-cert-hash" | awk '{print $2}')
+sed -i "s/NODE_NAME/$full_hostname/g" /tmp/node.yaml
+sed -i "s/TOKEN/$token/g" /tmp/node.yaml
+sed -i "s/HASHES/$hashes/g" /tmp/node.yaml
+sed -i "s/CERT_KEY/$cert_key/g" /tmp/node.yaml
+sed -i "s/LOCAL_IP/$local_ip/g" /tmp/node.yaml
+sudo kubeadm join --config /tmp/node.yaml
+sudo mkdir -p /home/${k8s_os_user}/.kube
+sudo cp -i /etc/kubernetes/admin.conf /home/${k8s_os_user}/.kube/config
+sudo chown -R ${k8s_os_user}:${k8s_os_user} /home/${k8s_os_user}/.kube
+sudo -i -u ${k8s_os_user} bash -c 'curl -L https://git.io/get_helm.sh | bash'
+sudo -i -u ${k8s_os_user} helm init --client-only --history-max 200
+fi
+cat <<EOF > /tmp/update_files.sh
+#!/bin/bash
+sudo kubeadm token create --print-join-command > /tmp/join_command
+sudo kubeadm init phase upload-certs --upload-certs | grep -v "upload-certs" > /tmp/cert_key
+aws s3 cp /tmp/join_command s3://${k8s-bucket-name}/k8s/masters/join_command
+aws s3 cp /tmp/cert_key s3://${k8s-bucket-name}/k8s/masters/cert_key
+sudo rm -f /tmp/join_command
+sudo rm -f /tmp/cert_key
+EOF
+sudo mv /tmp/update_files.sh /usr/local/bin/update_files.sh
+sudo chmod 755 /usr/local/bin/update_files.sh
+sudo bash -c 'echo "0 0 * * * root /usr/local/bin/update_files.sh" >> /etc/crontab'
+
+#cat <<EOF > /tmp/remove-etcd-member.sh
+##!/bin/bash
+#hostname=\$(/bin/hostname)
+#not_ready_node=\$(/usr/bin/sudo -i -u ${k8s_os_user} /usr/bin/kubectl get nodes | grep NotReady | grep master | awk '{print \$1}')
+#if [[ \$not_ready_node != "" ]]; then
+#etcd_pod_name=\$(/usr/bin/sudo -i -u ${k8s_os_user} /usr/bin/kubectl get pods -n kube-system | /bin/grep etcd \
+#    | /bin/grep "\$hostname" | /usr/bin/awk '{print \$1}')
+#etcd_member_id=\$(/usr/bin/sudo -i -u ${k8s_os_user} /usr/bin/kubectl -n kube-system exec -it \$etcd_pod_name \
+#    -- /bin/sh -c "ETCDCTL_API=3 etcdctl member list --endpoints=https://[127.0.0.1]:2379 \
+#    --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt \
+#    --key=/etc/kubernetes/pki/etcd/healthcheck-client.key"  | /bin/grep ", \$not_ready_node" | /usr/bin/awk -F',' '{print \$1}')
+#/usr/bin/sudo -i -u ${k8s_os_user} /usr/bin/kubectl -n kube-system exec -it \$etcd_pod_name \
+#    -- /bin/sh -c "ETCDCTL_API=3 etcdctl member remove \$etcd_member_id --endpoints=https://[127.0.0.1]:2379 \
+#    --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt \
+#    --key=/etc/kubernetes/pki/etcd/healthcheck-client.key"
+#/usr/bin/sudo -i -u ${k8s_os_user} /usr/bin/kubectl delete node \$not_ready_node
+#
+#fi
+#
+#EOF
+# sudo mv /tmp/remove-etcd-member.sh /usr/local/bin/remove-etcd-member.sh
+# sudo chmod 755 /usr/local/bin/remove-etcd-member.sh
+# sleep 300
+# sudo bash -c 'echo "* * * * * root /usr/local/bin/remove-etcd-member.sh >> /var/log/cron_k8s.log 2>&1" >> /etc/crontab'
+sudo -i -u ${k8s_os_user} helm repo update
+wget https://releases.hashicorp.com/terraform/0.12.12/terraform_0.12.12_linux_amd64.zip -O /tmp/terraform_0.12.12_linux_amd64.zip
+unzip /tmp/terraform_0.12.12_linux_amd64.zip -d /tmp/
+sudo mv /tmp/terraform /usr/local/bin/
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/ssn-policy.json.tpl b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/ssn-policy.json.tpl
new file mode 100644
index 0000000..e197744
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/ssn-policy.json.tpl
@@ -0,0 +1,42 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+        "Action": [
+            "s3:CreateBucket",
+            "s3:ListAllMyBuckets",
+            "s3:GetBucketLocation",
+            "s3:GetBucketTagging",
+            "s3:PutBucketTagging",
+            "s3:PutBucketPolicy",
+            "s3:GetBucketPolicy",
+            "s3:DeleteBucket",
+            "s3:DeleteObject",
+            "s3:GetObject",
+            "s3:ListBucket",
+            "s3:PutObject",
+            "s3:PutEncryptionConfiguration"
+        ],
+        "Effect": "Allow",
+        "Resource": "*"
+    },
+    {
+        "Effect": "Allow",
+        "Action": [
+            "autoscaling:DescribeAutoScalingInstances",
+            "ec2:DescribeInstances",
+            "elasticloadbalancing:DescribeTargetHealth",
+            "elasticloadbalancing:*",
+            "ec2:*"
+        ],
+        "Resource": "*"
+    },
+    {
+        "Action": [
+            "pricing:GetProducts"
+        ],
+        "Effect": "Allow",
+        "Resource": "*"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/workers-user-data.sh b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/workers-user-data.sh
new file mode 100644
index 0000000..04a8f57
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/files/workers-user-data.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+set -e
+
+check_tokens () {
+RUN=$(aws s3 ls s3://${k8s-bucket-name}/k8s/masters/ > /dev/null && echo "true" || echo "false")
+sleep 5
+}
+
+# Creating DLab user
+sudo useradd -m -G sudo -s /bin/bash ${k8s_os_user}
+sudo bash -c 'echo "${k8s_os_user} ALL = NOPASSWD:ALL" >> /etc/sudoers'
+sudo mkdir /home/${k8s_os_user}/.ssh
+sudo bash -c 'cat /home/ubuntu/.ssh/authorized_keys > /home/${k8s_os_user}/.ssh/authorized_keys'
+sudo chown -R ${k8s_os_user}:${k8s_os_user} /home/${k8s_os_user}/
+sudo chmod 700 /home/${k8s_os_user}/.ssh
+sudo chmod 600 /home/${k8s_os_user}/.ssh/authorized_keys
+
+sudo apt-get update
+sudo apt-get install -y python-pip
+sudo pip install -U pip
+sudo pip install awscli
+
+# installing Docker
+sudo bash -c 'curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -'
+sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce
+sudo systemctl enable docker
+# installing kubeadm, kubelet and kubectl
+sudo apt-get install -y apt-transport-https curl
+sudo bash -c 'curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -'
+sudo bash -c 'echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list'
+sudo apt-get update
+sudo apt-get install -y kubelet=${kubernetes_version} kubeadm=${kubernetes_version} kubectl=${kubernetes_version}
+while check_tokens
+do
+    if [[ $RUN == "false" ]];
+    then
+        echo "Waiting for initial cluster initialization..."
+    else
+        echo "Initial cluster initialized!"
+        break
+    fi
+done
+
+cat <<EOF > /tmp/node.yaml
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+discovery:
+  bootstrapToken:
+    apiServerEndpoint: ${k8s-nlb-dns-name}:6443
+    caCertHashes:
+    - HASHES
+    token: TOKEN
+  tlsBootstrapToken: TOKEN
+kind: JoinConfiguration
+nodeRegistration:
+  kubeletExtraArgs:
+    cloud-provider: aws
+  name: NODE_NAME
+EOF
+aws s3 cp s3://${k8s-bucket-name}/k8s/masters/join_command /tmp/join_command
+token=$(cat /tmp/join_command | sed 's/--\+/\n/g' | grep "token " | awk '{print $2}')
+hashes=$(cat /tmp/join_command | sed 's/--\+/\n/g' | grep "discovery-token-ca-cert-hash" | awk '{print $2}')
+full_hostname=$(curl http://169.254.169.254/latest/meta-data/hostname)
+sed -i "s/NODE_NAME/$full_hostname/g" /tmp/node.yaml
+sed -i "s/TOKEN/$token/g" /tmp/node.yaml
+sed -i "s/HASHES/$hashes/g" /tmp/node.yaml
+sudo kubeadm join --config /tmp/node.yaml
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
new file mode 100644
index 0000000..bd0baf8
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
@@ -0,0 +1,92 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  ssn_nlb_name                 = "${var.service_base_name}-ssn-nlb"
+  ssn_alb_name                 = "${var.service_base_name}-ssn-alb"
+  ssn_k8s_nlb_api_tg_name      = "${var.service_base_name}-ssn-nlb-api-tg"
+  ssn_k8s_nlb_step_ca_tg_name  = "${var.service_base_name}-ssn-nlb-step-ca-tg"
+  ssn_k8s_alb_tg_name          = "${var.service_base_name}-ssn-alb-tg"
+}
+
+resource "aws_lb" "ssn_k8s_nlb" {
+  name               = local.ssn_nlb_name
+  load_balancer_type = "network"
+  subnets            = compact([data.aws_subnet.k8s-subnet-a-data.id, data.aws_subnet.k8s-subnet-b-data.id,
+                                local.subnet_c_id])
+  tags               = {
+    Name                                          = local.ssn_nlb_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_nlb_name}"
+    "${var.service_base_name}-tag"                = local.ssn_nlb_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_lb_target_group" "ssn_k8s_nlb_api_target_group" {
+  name     = local.ssn_k8s_nlb_api_tg_name
+  port     = 6443
+  protocol = "TCP"
+  vpc_id   = data.aws_vpc.ssn_k8s_vpc_data.id
+  tags     = {
+    Name                                          = local.ssn_k8s_nlb_api_tg_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_k8s_nlb_api_tg_name}"
+    "${var.service_base_name}-tag"                = local.ssn_k8s_nlb_api_tg_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_lb_target_group" "ssn_k8s_nlb_step_ca_target_group" {
+  name     = local.ssn_k8s_nlb_step_ca_tg_name
+  port     = 32433
+  protocol = "TCP"
+  vpc_id   = data.aws_vpc.ssn_k8s_vpc_data.id
+  tags     = {
+    Name                                          = local.ssn_k8s_nlb_step_ca_tg_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_k8s_nlb_step_ca_tg_name}"
+    "${var.service_base_name}-tag"                = local.ssn_k8s_nlb_step_ca_tg_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_lb_listener" "ssn_k8s_nlb_api_listener" {
+  load_balancer_arn = aws_lb.ssn_k8s_nlb.arn
+  port              = "6443"
+  protocol          = "TCP"
+
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.ssn_k8s_nlb_api_target_group.arn
+  }
+}
+
+resource "aws_lb_listener" "ssn_k8s_nlb_step_ca_listener" {
+  load_balancer_arn = aws_lb.ssn_k8s_nlb.arn
+  port              = "443"
+  protocol          = "TCP"
+
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.ssn_k8s_nlb_step_ca_target_group.arn
+  }
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/main.tf
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/aws/ssn-k8s/main/main.tf
index 16da950..56d5374 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/main.tf
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+provider "aws" {
+  region     = var.region
+  access_key = var.access_key_id
+  secret_key = var.secret_access_key
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/outputs.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/outputs.tf
new file mode 100644
index 0000000..0decf26
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/outputs.tf
@@ -0,0 +1,70 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+output "ssn_k8s_nlb_dns_name" {
+  value = aws_lb.ssn_k8s_nlb.dns_name
+}
+
+output "ssn_k8s_masters_ip_addresses" {
+  value = data.aws_instances.ssn_k8s_masters_instances.public_ips
+  depends_on = [data.aws_instances.ssn_k8s_masters_instances]
+}
+
+output "ssn_bucket_name" {
+  value = aws_s3_bucket.ssn_k8s_bucket.id
+}
+
+output "ssn_vpc_id" {
+  value = data.aws_vpc.ssn_k8s_vpc_data.id
+}
+
+output "ssn_subnet_id" {
+  #  value = compact([data.aws_subnet.k8s-subnet-a-data.id, data.aws_subnet.k8s-subnet-b-data.id, local.subnet_c_id])
+  value = data.aws_subnet.k8s-subnet-a-data.id
+}
+
+output "ssn_k8s_sg_id" {
+  value = aws_security_group.ssn_k8s_sg.id
+}
+
+output "region" {
+  value = var.region
+}
+
+output "service_base_name" {
+  value = var.service_base_name
+}
+
+output "env_os" {
+  value = var.env_os
+}
+
+output "ssn_k8s_masters_shape" {
+  value = var.ssn_k8s_masters_shape
+}
+
+output "zone" {
+  value = var.zone
+}
+
+output "tag_resource_id" {
+  value = var.tag_resource_id
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
new file mode 100644
index 0000000..e01b1d6
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
@@ -0,0 +1,57 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  ssn_policy_name = "${var.service_base_name}-ssn-policy"
+  ssn_role_name   = "${var.service_base_name}-ssn-role"
+}
+
+data "template_file" "ssn_k8s_s3_policy" {
+  template = file("./files/ssn-policy.json.tpl")
+}
+
+resource "aws_iam_policy" "ssn_k8s_policy" {
+  name        = local.ssn_policy_name
+  description = "Policy for SSN K8S"
+  policy      = data.template_file.ssn_k8s_s3_policy.rendered
+}
+
+resource "aws_iam_role" "ssn_k8s_role" {
+  name               = local.ssn_role_name
+  assume_role_policy = file("./files/assume-policy.json")
+  tags               = {
+    Name                                          = local.ssn_role_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_role_name}"
+    "${var.service_base_name}-tag"                = local.ssn_role_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_iam_role_policy_attachment" "ssn_k8s_policy_attachment" {
+  role       = aws_iam_role.ssn_k8s_role.name
+  policy_arn = aws_iam_policy.ssn_k8s_policy.arn
+}
+
+resource "aws_iam_instance_profile" "k8s-profile" {
+  name = "${var.service_base_name}-instance-profile"
+  role = aws_iam_role.ssn_k8s_role.name
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
similarity index 61%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
index 951fdd7..f91e6ca 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
@@ -19,22 +19,20 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+locals {
+  ssn_s3_name = "${var.service_base_name}-ssn-bucket"
+}
 
+resource "aws_s3_bucket" "ssn_k8s_bucket" {
+  bucket = local.ssn_s3_name
+  acl    = "private"
+  tags   = {
+    Name                                          = local.ssn_s3_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_s3_name}"
+    "${var.service_base_name}-tag"                = local.ssn_s3_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+  force_destroy = true
+}
 
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
new file mode 100644
index 0000000..b9f7fa8
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  ssn_sg_name = "${var.service_base_name}-ssn-sg"
+}
+
+resource "aws_security_group" "ssn_k8s_sg" {
+  name        = local.ssn_sg_name
+  description = "SG for SSN K8S cluster"
+  vpc_id      = data.aws_vpc.ssn_k8s_vpc_data.id
+
+  ingress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = -1
+    cidr_blocks = [data.aws_vpc.ssn_k8s_vpc_data.cidr_block]
+  }
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = var.allowed_cidrs
+  }
+  ingress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = -1
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Need to be changed in the future"
+  }
+
+  egress {
+    from_port   = 0
+    protocol    = -1
+    to_port     = 0
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  tags = {
+    Name                                          = local.ssn_sg_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_sg_name}"
+    "${var.service_base_name}-tag"                = local.ssn_sg_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/variables.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/variables.tf
new file mode 100644
index 0000000..d2515b2
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/variables.tf
@@ -0,0 +1,108 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+// AWS info
+variable "access_key_id" {
+  default = ""
+}
+variable "secret_access_key" {
+  default = ""
+}
+variable "region" {
+  default = "us-west-2"
+}
+variable "zone" {
+  default = "a"
+}
+
+// Common
+variable "env_os" {
+  default = "debian"
+}
+variable "key_name" {
+  default = "BDCC-DSS-POC"
+}
+variable "allowed_cidrs" {
+  type = list
+  default = ["0.0.0.0/0"]
+}
+variable "os_user" {
+  default = "dlab-user"
+}
+
+variable "project_tag" {
+  default = ""
+}
+
+variable "additional_tag" {
+  default = "product:dlab"
+}
+
+variable "tag_resource_id" {
+  default = "user:tag"
+}
+
+// SSN
+variable "service_base_name" {
+  default = "dlab-k8s"
+}
+variable "vpc_id" {
+  default = ""
+}
+variable "vpc_cidr" {
+  default = "172.31.0.0/16"
+}
+variable "subnet_id_a" {
+  default = ""
+}
+variable "subnet_id_b" {
+  default = ""
+}
+variable "subnet_cidr_a" {
+  default = "172.31.0.0/24"
+}
+variable "subnet_cidr_b" {
+  default = "172.31.1.0/24"
+}
+variable "subnet_cidr_c" {
+  default = "172.31.2.0/24"
+}
+variable "ami" {
+  default = "ami-07b4f3c02c7f83d59"
+}
+variable "ssn_k8s_masters_count" {
+  default = 3
+}
+variable "ssn_k8s_workers_count" {
+  default = 2
+}
+variable "ssn_root_volume_size" {
+  default = 30
+}
+variable "ssn_k8s_masters_shape" {
+  default = "t2.medium"
+}
+variable "ssn_k8s_workers_shape" {
+  default = "t2.medium"
+}
+variable "kubernetes_version" {
+  default = "1.15.5-00"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
new file mode 100644
index 0000000..699dfcd
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
@@ -0,0 +1,160 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  additional_tag       = split(":", var.additional_tag)
+  ssn_vpc_name      = "${var.service_base_name}-vpc"
+  ssn_igw_name      = "${var.service_base_name}-igw"
+  ssn_subnet_a_name = "${var.service_base_name}-ssn-subnet-az-a"
+  ssn_subnet_b_name = "${var.service_base_name}-ssn-subnet-az-b"
+  ssn_subnet_c_name = "${var.service_base_name}-ssn-subnet-az-c"
+  endpoint_rt_name  = "${var.service_base_name}-endpoint-rt"
+  endpoint_s3_name  = "${var.service_base_name}-endpoint-s3"
+}
+
+resource "aws_vpc" "ssn_k8s_vpc" {
+  count = var.vpc_id == "" ? 1 : 0
+  cidr_block           = var.vpc_cidr
+  instance_tenancy     = "default"
+  enable_dns_hostnames = true
+  enable_dns_support   = true
+
+  tags = {
+    Name                                          = local.ssn_vpc_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_vpc_name}"
+    "${var.service_base_name}-tag"                = local.ssn_vpc_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_internet_gateway" "ssn_k8s_igw" {
+  count  = var.vpc_id == "" ? 1 : 0
+  vpc_id = aws_vpc.ssn_k8s_vpc.0.id
+
+  tags = {
+    Name                                          = local.ssn_igw_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_igw_name}"
+    "${var.service_base_name}-tag"                = local.ssn_igw_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_route" "ssn_k8s_route" {
+  count                     = var.vpc_id == "" ? 1 : 0
+  route_table_id            = aws_vpc.ssn_k8s_vpc.0.main_route_table_id
+  destination_cidr_block    = "0.0.0.0/0"
+  gateway_id                = aws_internet_gateway.ssn_k8s_igw.0.id
+}
+
+data "aws_vpc" "ssn_k8s_vpc_data" {
+  id = var.vpc_id == "" ? aws_vpc.ssn_k8s_vpc.0.id : var.vpc_id
+}
+
+resource "aws_subnet" "ssn_k8s_subnet_a" {
+  count                   = var.subnet_id_a == "" ? 1 : 0
+  vpc_id                  = data.aws_vpc.ssn_k8s_vpc_data.id
+  availability_zone       = "${var.region}a"
+  cidr_block              = var.subnet_cidr_a
+  map_public_ip_on_launch = true
+
+  tags = {
+    Name                                          = local.ssn_subnet_a_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_subnet_a_name}"
+    "${var.service_base_name}-tag"                = local.ssn_subnet_a_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_subnet" "ssn_k8s_subnet_b" {
+  count                   = var.subnet_id_b == "" ? 1 : 0
+  vpc_id                  = data.aws_vpc.ssn_k8s_vpc_data.id
+  availability_zone       = "${var.region}b"
+  cidr_block              = var.subnet_cidr_b
+  map_public_ip_on_launch = true
+
+  tags = {
+    Name                                          = local.ssn_subnet_b_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_subnet_b_name}"
+    "${var.service_base_name}-tag"                = local.ssn_subnet_b_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_subnet" "ssn_k8s_subnet_c" {
+  count                   = var.ssn_k8s_masters_count > 2 ? 1 : 0
+  vpc_id                  = data.aws_vpc.ssn_k8s_vpc_data.id
+  availability_zone       = "${var.region}c"
+  cidr_block              = var.subnet_cidr_c
+  map_public_ip_on_launch = true
+
+  tags = {
+    Name                                          = local.ssn_subnet_c_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_subnet_c_name}"
+    "${var.service_base_name}-tag"                = local.ssn_subnet_c_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+data "aws_subnet" "k8s-subnet-a-data" {
+  id = var.subnet_id_a == "" ? aws_subnet.ssn_k8s_subnet_a.0.id : var.subnet_id_a
+}
+
+data "aws_subnet" "k8s-subnet-b-data" {
+  id = var.subnet_id_b == "" ? aws_subnet.ssn_k8s_subnet_b.0.id : var.subnet_id_b
+}
+
+data "aws_subnet" "k8s-subnet-c-data" {
+  count = var.ssn_k8s_masters_count > 2 ? 1 : 0
+  id = aws_subnet.ssn_k8s_subnet_c.0.id
+}
+
+resource "aws_route_table" "ssn-k8s-users-route-table" {
+  vpc_id = data.aws_vpc.ssn_k8s_vpc_data.id
+  tags = {
+    Name                                          = local.endpoint_rt_name
+    "${var.service_base_name}-tag"                = var.service_base_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.endpoint_rt_name}"
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_vpc_endpoint" "ssn-k8s-users-s3-endpoint" {
+  vpc_id       = data.aws_vpc.ssn_k8s_vpc_data.id
+  service_name = "com.amazonaws.${var.region}.s3"
+  tags = {
+    Name                                          = local.endpoint_s3_name
+    "${local.additional_tag[0]}"                  = local.additional_tag[1]
+    "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.endpoint_s3_name}"
+    "${var.service_base_name}-tag"                = local.endpoint_s3_name
+    "kubernetes.io/cluster/${local.cluster_name}" = "owned"
+  }
+}
+
+resource "aws_vpc_endpoint_route_table_association" "ssn-k8s-users-s3-route" {
+  route_table_id  = aws_route_table.ssn-k8s-users-route-table.id
+  vpc_endpoint_id = aws_vpc_endpoint.ssn-k8s-users-s3-endpoint.id
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf b/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
new file mode 100644
index 0000000..d76a16c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
@@ -0,0 +1,167 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  cluster_name  = "${var.sbn}-de-${var.notebook_name}-${var.cluster_name}"
+  notebook_name = "${var.sbn}-nb-${var.notebook_name}"
+  nic           = "${var.sbn}-de-${var.notebook_name}-${var.cluster_name}-nic"
+}
+
+resource "azurerm_network_interface" "master-nic" {
+    name                      = "${local.nic}-m"
+    location                  = var.region
+    resource_group_name       = var.resource_group
+    network_security_group_id = var.nb-sg_id
+
+    ip_configuration {
+        name                          = "${local.nic}-m-IPconigurations"
+        subnet_id                     = var.subnet_id
+        private_ip_address_allocation = "Dynamic"
+    }
+
+    tags = {
+        Name             = "${local.nic}-m"
+        Project_name     = var.project_name
+        Project_tag      = var.project_tag
+        Endpoint_Tag     = var.endpoint_tag
+        Product          = var.product
+        SBN              = var.sbn
+        User_Tag         = var.user_tag
+        Custom_Tag       = var.custom_tag
+    }
+}
+
+resource "azurerm_virtual_machine" "master" {
+    name                  = "${local.cluster_name}-m"
+    location              = var.region
+    resource_group_name   = var.resource_group
+    network_interface_ids = ["${azurerm_network_interface.master-nic.id}"]
+    vm_size               = var.master_shape
+
+    storage_os_disk {
+        name              = "${local.cluster_name}-m-volume-primary"
+        caching           = "ReadWrite"
+        create_option     = "FromImage"
+        managed_disk_type = "Premium_LRS"
+    }
+
+    storage_image_reference {
+        id = var.ami
+    }
+
+    os_profile {
+        computer_name  = "${local.cluster_name}-m"
+        admin_username = var.initial_user
+    }
+
+    os_profile_linux_config {
+        disable_password_authentication = true
+        ssh_keys {
+            path     = "/home/${var.initial_user}/.ssh/authorized_keys"
+            key_data = "${file("${var.ssh_key}")}"
+        }
+    }
+
+    tags = {
+        Name                     = "${local.cluster_name}-m"
+        Type                     = "master"
+        dataengine_notebook_name = local.notebook_name
+        Product                  = var.product
+        Project_name             = var.project_name
+        Project_tag              = var.project_tag
+        User_tag                 = var.user_tag
+        Endpoint_Tag             = var.endpoint_tag
+        SBN                      = var.sbn
+        Custom_Tag               = var.custom_tag
+    }
+}
+
+
+resource "azurerm_network_interface" "slave-nic" {
+    count                     = var.slave_count
+    name                      = "${local.nic}-s-${count.index + 1}"
+    location                  = var.region
+    resource_group_name       = var.resource_group
+    network_security_group_id = var.nb-sg_id
+
+    ip_configuration {
+        name                          = "${local.nic}-s-${count.index + 1}-IPconigurations"
+        subnet_id                     = var.subnet_id
+        private_ip_address_allocation = "Dynamic"
+    }
+
+    tags = {
+        Name             = "${local.cluster_name}-s-${count.index + 1}"
+        Project_name     = var.project_name
+        Project_tag      = var.project_tag
+        Endpoint_Tag     = var.endpoint_tag
+        SBN              = var.sbn
+        Product          = var.product
+        User_Tag         = var.user_tag
+        Custom_Tag       = var.custom_tag
+    }
+}
+
+resource "azurerm_virtual_machine" "slave" {
+    count                 = var.slave_count
+    name                  = "${local.cluster_name}-s-${count.index + 1}"
+    location              = var.region
+    resource_group_name   = var.resource_group
+    network_interface_ids = ["${azurerm_network_interface.slave-nic[count.index].id}"]
+    vm_size               = var.slave_shape
+
+    storage_os_disk {
+        name              = "${local.notebook_name}-s-${count.index + 1}-volume-primary"
+        caching           = "ReadWrite"
+        create_option     = "FromImage"
+        managed_disk_type = "Premium_LRS"
+    }
+
+    storage_image_reference {
+        id = var.ami
+    }
+
+    os_profile {
+        computer_name  = "${local.cluster_name}-s-${count.index + 1}"
+        admin_username = var.initial_user
+    }
+
+    os_profile_linux_config {
+        disable_password_authentication = true
+        ssh_keys {
+            path     = "/home/${var.initial_user}/.ssh/authorized_keys"
+            key_data = "${file("${var.ssh_key}")}"
+        }
+    }
+
+    tags = {
+        Name                     = "${local.cluster_name}-s-${count.index + 1}"
+        Type                     = "slave"
+        dataengine_notebook_name = local.notebook_name
+        Product                  = var.product
+        Project_name             = var.project_name
+        Project_tag              = var.project_tag
+        User_tag                 = var.user_tag
+        Endpoint_Tag             = var.endpoint_tag
+        SBN                      = var.sbn
+        Custom_Tag               = var.custom_tag
+    }
+}
diff --git a/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf b/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
new file mode 100644
index 0000000..70e1db5
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
@@ -0,0 +1,143 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  node_name = "${var.sbn}-nb-${var.notebook_name}"
+  nic       = "${var.sbn}-nb-${var.notebook_name}-nic"
+}
+
+resource "azurerm_network_interface" "nic" {
+    name                      = local.nic
+    location                  = var.region
+    resource_group_name       = var.resource_group
+    network_security_group_id = var.nb-sg_id
+
+    ip_configuration {
+        name                          = "${local.nic}-IPconigurations"
+        subnet_id                     = var.subnet_id
+        private_ip_address_allocation = "dynamic"
+    }
+
+    tags = {
+        Exploratory      = var.notebook_name
+        SBN              = var.sbn
+        Name             = local.node_name
+        Project_name     = var.project_name
+        Project_tag      = var.project_tag
+        Endpoint_Tag     = var.endpoint_tag
+        Product          = var.product
+        User_Tag         = var.user_tag
+        Custom_Tag       = var.custom_tag
+    }
+}
+
+resource "azurerm_virtual_machine" "instance" {
+    count                 = var.custom_ami  == true ? 0 : 1
+    name                  = local.node_name
+    location              = var.region
+    resource_group_name   = var.resource_group
+    network_interface_ids = ["${azurerm_network_interface.nic.id}"]
+    vm_size               = var.instance_type
+
+    storage_os_disk {
+        name              = "${local.node_name}-volume-primary"
+        caching           = "ReadWrite"
+        create_option     = "FromImage"
+        managed_disk_type = "Premium_LRS"
+    }
+
+    storage_image_reference {
+        publisher = var.ami_publisher[var.os_env]
+        offer     = var.ami_offer[var.os_env]
+        sku       = var.ami_sku[var.os_env]
+        version   = var.ami_version[var.os_env]
+    }
+
+    os_profile {
+        computer_name  = local.node_name
+        admin_username = var.initial_user
+    }
+
+    os_profile_linux_config {
+        disable_password_authentication = true
+        ssh_keys {
+            path     = "/home/${var.initial_user}/.ssh/authorized_keys"
+            key_data = "${file("${var.ssh_key}")}"
+        }
+    }
+
+    tags = {
+        Exploratory      = var.notebook_name
+        SBN              = var.sbn
+        Name             = local.node_name
+        Project_name     = var.project_name
+        Project_tag      = var.project_tag
+        Endpoint_Tag     = var.endpoint_tag
+        Product          = var.product
+        User_Tag         = var.user_tag
+        Custom_Tag       = var.custom_tag
+    }
+}
+
+resource "azurerm_virtual_machine" "instance_custom" {
+    count                 = var.custom_ami  == true ? 1 : 0
+    name                  = local.node_name
+    location              = var.region
+    resource_group_name   = var.resource_group
+    network_interface_ids = ["${azurerm_network_interface.nic.id}"]
+    vm_size               = var.instance_type
+
+    storage_os_disk {
+        name              = "${local.node_name}-volume-primary"
+        caching           = "ReadWrite"
+        create_option     = "FromImage"
+        managed_disk_type = "Premium_LRS"
+    }
+
+    storage_image_reference {
+        id = var.ami
+    }
+
+    os_profile {
+        computer_name  = local.node_name
+        admin_username = var.initial_user
+    }
+
+    os_profile_linux_config {
+        disable_password_authentication = true
+        ssh_keys {
+            path     = "/home/${var.initial_user}/.ssh/authorized_keys"
+            key_data = "${file("${var.ssh_key}")}"
+        }
+    }
+
+    tags = {
+        Exploratory      = var.notebook_name
+        SBN              = var.sbn
+        Name             = local.node_name
+        Project_name     = var.project_name
+        Project_tag      = var.project_tag
+        Endpoint_Tag     = var.endpoint_tag
+        Product          = var.product
+        User_Tag         = var.user_tag
+        Custom_Tag       = var.custom_tag
+    }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf
new file mode 100644
index 0000000..a44a37f
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf
@@ -0,0 +1,53 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+ shared_bucket_name = lower("${var.service_base_name}-${var.endpoint_id}-shared-bucket")
+}
+
+resource "random_string" "shared_bucket_service_name" {
+  length  = 10
+  special = false
+  lower   = true
+  upper   = false
+}
+
+resource "azurerm_storage_account" "shared-endpoint-storage-account" {
+  name                     = random_string.shared_bucket_service_name.result
+  resource_group_name      = data.azurerm_resource_group.data-endpoint-resource-group.name
+  location                 = data.azurerm_resource_group.data-endpoint-resource-group.location
+  account_tier             = "Standard"
+  account_replication_type = "LRS"
+  account_kind             = "BlobStorage"
+
+  tags = {
+    Name                              = local.shared_bucket_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.service_base_name}-tag"    = local.shared_bucket_name
+    "endpoint_tag"                    = var.endpoint_id
+  }
+}
+
+resource "azurerm_storage_container" "shared-endpoint-storage-container" {
+  name                  = local.shared_bucket_name
+  storage_account_name  = azurerm_storage_account.shared-endpoint-storage-account.name
+  container_access_type = "private"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
new file mode 100644
index 0000000..82c1497
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
@@ -0,0 +1,70 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  endpoint_instance_name      = "${var.service_base_name}-${var.endpoint_id}-endpoint"
+  endpoint_instance_disk_name = "${var.service_base_name}-${var.endpoint_id}-endpoint-volume"
+}
+
+data "tls_public_key" "enpoint_key" {
+  private_key_pem = file(var.key_path)
+}
+
+resource "azurerm_virtual_machine" "endpoint_instance" {
+  name                          = local.endpoint_instance_name
+  location                      = data.azurerm_resource_group.data-endpoint-resource-group.location
+  resource_group_name           = data.azurerm_resource_group.data-endpoint-resource-group.name
+  network_interface_ids         = [azurerm_network_interface.endpoint-nif.id]
+  vm_size                       = var.endpoint_shape
+  delete_os_disk_on_termination = true
+
+  storage_image_reference {
+    publisher = element(split("_", var.ami),0)
+    offer     = element(split("_", var.ami),1)
+    sku       = element(split("_", var.ami),2)
+    version   = "latest"
+  }
+  storage_os_disk {
+    os_type = "Linux"
+    name              = local.endpoint_instance_disk_name
+    create_option     = "FromImage"
+    disk_size_gb      = var.endpoint_volume_size
+    managed_disk_type = "Premium_LRS"
+  }
+  os_profile {
+    computer_name  = local.endpoint_instance_name
+    admin_username = "ubuntu"
+  }
+  os_profile_linux_config {
+    disable_password_authentication = true
+    ssh_keys {
+      key_data = data.tls_public_key.enpoint_key.public_key_openssh
+      path = "/home/ubuntu/.ssh/authorized_keys"
+    }
+  }
+
+  tags = {
+    Name                              = local.endpoint_instance_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_instance_name}"
+    "${var.service_base_name}-tag"    = local.endpoint_instance_name
+  }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
new file mode 100644
index 0000000..cbf2187
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
@@ -0,0 +1,47 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  resource_group_name = "${var.service_base_name}-${var.endpoint_id}-resource-group"
+  json_data           = jsondecode(file(var.auth_file_path))
+}
+
+provider "azurerm" {
+  features {}
+  subscription_id = local.json_data.subscriptionId
+  client_id       = local.json_data.clientId
+  client_secret   = local.json_data.clientSecret
+  tenant_id       = local.json_data.tenantId
+}
+
+resource "azurerm_resource_group" "endpoint-resource-group" {
+  count    = var.resource_group_name == "" ? 1 : 0
+  name     = local.resource_group_name
+  location = var.region
+
+  tags = {
+    Name = var.service_base_name
+  }
+}
+
+data "azurerm_resource_group" "data-endpoint-resource-group" {
+  name = var.resource_group_name == "" ? azurerm_resource_group.endpoint-resource-group.0.name : var.resource_group_name
+}
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
new file mode 100644
index 0000000..738f062
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
@@ -0,0 +1,103 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  endpoint_subnet_name       = "${var.service_base_name}-${var.endpoint_id}-subnet"
+  endpoint_vpc_name          = "${var.service_base_name}-${var.endpoint_id}-vpc"
+  additional_tag             = split(":", var.additional_tag)
+  endpoint_ip_name           = "${var.service_base_name}-${var.endpoint_id}-static-ip"
+  endpoint_nif_name          = "${var.service_base_name}-${var.endpoint_id}-nif"
+}
+
+resource "azurerm_virtual_network" "endpoint-network" {
+  count               = var.vpc_id == "" ? 1 : 0
+  name                = local.endpoint_vpc_name
+  location            = data.azurerm_resource_group.data-endpoint-resource-group.location
+  resource_group_name = data.azurerm_resource_group.data-endpoint-resource-group.name
+  address_space       = [var.vpc_cidr]
+
+  tags = {
+    Name                              = local.endpoint_vpc_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_vpc_name}"
+    "${var.service_base_name}-tag"    = local.endpoint_vpc_name
+  }
+}
+
+data "azurerm_virtual_network" "data-endpoint-network" {
+  name                = var.vpc_id == "" ? azurerm_virtual_network.endpoint-network.0.name : var.vpc_id
+  resource_group_name = data.azurerm_resource_group.data-endpoint-resource-group.name
+}
+
+resource "azurerm_subnet" "endpoint-subnet" {
+  count                = var.subnet_id == "" ? 1 : 0
+  name                 = local.endpoint_subnet_name
+  resource_group_name  = data.azurerm_resource_group.data-endpoint-resource-group.name
+  virtual_network_name = data.azurerm_virtual_network.data-endpoint-network.name
+  address_prefix       = var.subnet_cidr
+}
+
+data "azurerm_subnet" "data-endpoint-subnet" {
+  name                 = var.subnet_id == "" ? azurerm_subnet.endpoint-subnet.0.name : var.subnet_id
+  virtual_network_name = data.azurerm_virtual_network.data-endpoint-network.name
+  resource_group_name  = data.azurerm_resource_group.data-endpoint-resource-group.name
+}
+
+resource "azurerm_public_ip" "endpoint-static-ip" {
+  name                = local.endpoint_ip_name
+  location            = var.region
+  resource_group_name = data.azurerm_resource_group.data-endpoint-resource-group.name
+  allocation_method   = "Static"
+
+  tags = {
+    Name                              = local.endpoint_ip_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_ip_name}"
+    "${var.service_base_name}-tag"    = local.endpoint_ip_name
+  }
+}
+
+resource "azurerm_network_interface" "endpoint-nif" {
+  name                      = local.endpoint_nif_name
+  location                  = data.azurerm_resource_group.data-endpoint-resource-group.location
+  resource_group_name       = data.azurerm_resource_group.data-endpoint-resource-group.name
+
+  ip_configuration {
+    name                          = "configuration"
+    subnet_id                     = data.azurerm_subnet.data-endpoint-subnet.id
+    private_ip_address_allocation = "Dynamic"
+    public_ip_address_id          = azurerm_public_ip.endpoint-static-ip.id
+    private_ip_address_version    = "IPv4"
+  }
+
+  tags = {
+    Name                              = local.endpoint_nif_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_nif_name}"
+    "${var.service_base_name}-tag"    = local.endpoint_nif_name
+  }
+}
+
+resource "azurerm_network_interface_security_group_association" "endpoint-nif-sg" {
+  network_interface_id      = azurerm_network_interface.endpoint-nif.id
+  network_security_group_id = azurerm_network_security_group.enpoint-sg.id
+  depends_on = [azurerm_virtual_machine.endpoint_instance]
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
similarity index 65%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
index 951fdd7..c005b29 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
@@ -19,22 +19,26 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+output "endpoint_eip_address" {
+  value = azurerm_public_ip.endpoint-static-ip.ip_address
+}
 
+output "subnet_id" {
+  value = data.azurerm_subnet.data-endpoint-subnet.name
+}
 
-USER root
+output "vpc_id" {
+  value = data.azurerm_virtual_network.data-endpoint-network.name
+}
 
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
+output "ssn_k8s_sg_id" {
+  value = azurerm_network_security_group.enpoint-sg.name
+}
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
+output "endpoint_id" {
+  value = var.endpoint_id
+}
 
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+output "resource_group_name" {
+  value = data.azurerm_resource_group.data-endpoint-resource-group.name
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/sg.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/sg.tf
new file mode 100644
index 0000000..08984f3
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/sg.tf
@@ -0,0 +1,86 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+   endpoint_sg_name = "${var.service_base_name}-${var.endpoint_id}-sg"
+}
+
+resource "azurerm_network_security_group" "enpoint-sg" {
+  location            = data.azurerm_resource_group.data-endpoint-resource-group.location
+  resource_group_name = data.azurerm_resource_group.data-endpoint-resource-group.name
+  name                = local.endpoint_sg_name
+}
+
+resource "azurerm_network_security_rule" "inbound-1" {
+  resource_group_name         = data.azurerm_resource_group.data-endpoint-resource-group.name
+  network_security_group_name = azurerm_network_security_group.enpoint-sg.name
+  name                        = "inbound-1"
+  direction                   = "Inbound"
+  access                      = "Allow"
+  priority                    = 100
+  source_address_prefix       = "*"
+  source_port_range           = "*"
+  destination_address_prefix  = "*"
+  destination_port_range      = "22"
+  protocol                    = "TCP"
+}
+
+resource "azurerm_network_security_rule" "inbound-2" {
+  resource_group_name         = data.azurerm_resource_group.data-endpoint-resource-group.name
+  network_security_group_name = azurerm_network_security_group.enpoint-sg.name
+  name                        = "inbound-2"
+  direction                   = "Inbound"
+  access                      = "Allow"
+  priority                    = 200
+  source_address_prefix       = "*"
+  source_port_range           = "*"
+  destination_address_prefix  = "*"
+  destination_port_range      = "8084"
+  protocol                    = "TCP"
+}
+
+resource "azurerm_network_security_rule" "inbound-3" {
+  resource_group_name         = data.azurerm_resource_group.data-endpoint-resource-group.name
+  network_security_group_name = azurerm_network_security_group.enpoint-sg.name
+  name                        = "inbound-3"
+  direction                   = "Inbound"
+  access                      = "Allow"
+  priority                    = 300
+  source_address_prefix       = "*"
+  source_port_range           = "*"
+  destination_address_prefix  = "*"
+  destination_port_range      = "8088"
+  protocol                    = "TCP"
+}
+
+resource "azurerm_network_security_rule" "outbound-1" {
+  resource_group_name         = data.azurerm_resource_group.data-endpoint-resource-group.name
+  network_security_group_name = azurerm_network_security_group.enpoint-sg.name
+  name                        = "outbound-1"
+  direction                   = "Outbound"
+  access                      = "Allow"
+  priority                    = 100
+  source_address_prefix       = "*"
+  source_port_range           = "*"
+  destination_address_prefix  = "*"
+  destination_port_range      = "*"
+  protocol                    = "*"
+}
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/variables.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/variables.tf
new file mode 100644
index 0000000..abc7e97
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/variables.tf
@@ -0,0 +1,82 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "auth_file_path" {}
+
+variable "resource_group_name" {
+  default = ""
+}
+
+variable "region" {
+  default = "West US 2"
+}
+
+variable "service_base_name" {}
+
+variable "endpoint_id" {}
+
+variable "additional_tag" {
+  default = "product:dlab"
+}
+
+variable "vpc_cidr" {}
+
+variable "tag_resource_id" {
+  default = "user:tag"
+}
+
+variable "vpc_id" {
+  default = ""
+}
+
+variable "subnet_id" {
+  default = ""
+}
+
+variable "subnet_cidr" {}
+
+variable "endpoint_shape" {}
+
+variable "ami" {
+  default = "Canonical_UbuntuServer_16.04-LTS"
+}
+
+variable "endpoint_volume_size" {}
+
+variable "key_path" {}
+
+variable "authentication_file" {
+  default = ""
+}
+
+variable "offer_number" {}
+
+variable "currency" {}
+
+variable "locale" {}
+
+variable "region_info" {}
+
+variable "mongo_password" {}
+
+variable "mongo_host" {}
+
+variable "billing_enable" {}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/project/main/instance.tf b/infrastructure-provisioning/terraform/azure/project/main/instance.tf
new file mode 100644
index 0000000..34cd26b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/project/main/instance.tf
@@ -0,0 +1,98 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  node_name = "${var.service_base_name}-${var.project_tag}-edge"
+  nic       = "${var.service_base_name}-${var.project_tag}-edge-nic"
+}
+
+resource "azurerm_network_interface" "nic" {
+    name                      = local.nic
+    location                  = var.region
+    resource_group_name       = var.resource_group
+    network_security_group_id = azurerm_network_security_group.edge_sg.id
+
+    ip_configuration {
+        name                          = "${local.nic}-IPconigurations"
+        subnet_id                     = var.subnet_id
+        #private_ip_address_allocation = "Dynamic"
+        private_ip_address_allocation = "Static"
+        private_ip_address            = var.edge_private_ip
+        public_ip_address_id          = azurerm_public_ip.edge_ip.id
+    }
+
+    tags = {
+        SBN              = var.service_base_name
+        Name             = local.node_name
+        Project_name     = var.project_name
+        Project_tag      = var.project_tag
+        Endpoint_Tag     = var.endpoint_tag
+        Product          = var.product
+        User_Tag         = var.user_tag
+        Custom_Tag       = var.custom_tag
+    }
+}
+
+resource "azurerm_virtual_machine" "instance" {
+    name                  = local.node_name
+    location              = var.region
+    resource_group_name   = var.resource_group
+    network_interface_ids = [azurerm_network_interface.nic.id]
+    vm_size               = var.instance_type
+
+    storage_os_disk {
+        name              = "${local.node_name}-volume-primary"
+        caching           = "ReadWrite"
+        create_option     = "FromImage"
+        managed_disk_type = "Premium_LRS"
+    }
+
+    storage_image_reference {
+        publisher = var.ami_publisher[var.os_env]
+        offer     = var.ami_offer[var.os_env]
+        sku       = var.ami_sku[var.os_env]
+        version   = var.ami_version[var.os_env]
+    }
+
+    os_profile {
+        computer_name  = local.node_name
+        admin_username = var.initial_user
+    }
+
+    os_profile_linux_config {
+        disable_password_authentication = true
+        ssh_keys {
+            path     = "/home/${var.initial_user}/.ssh/authorized_keys"
+            key_data = "${file("${var.ssh_key}")}"
+        }
+    }
+
+    tags = {
+        SBN              = var.service_base_name
+        Name             = local.node_name
+        Project_name     = var.project_name
+        Project_tag      = var.project_tag
+        Endpoint_Tag     = var.endpoint_tag
+        Product          = var.product
+        User_Tag         = var.user_tag
+        Custom_Tag       = var.custom_tag
+    }
+}
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/bin/deploy/__init__.py
similarity index 82%
rename from integration-tests-cucumber/src/test/resources/config.properties
rename to infrastructure-provisioning/terraform/bin/deploy/__init__.py
index d0cfc24..b639f64 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/bin/deploy/__init__.py
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,4 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
diff --git a/infrastructure-provisioning/terraform/bin/deploy/billing_app_aws.yml b/infrastructure-provisioning/terraform/bin/deploy/billing_app_aws.yml
new file mode 100644
index 0000000..dd33a9e
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/billing_app_aws.yml
@@ -0,0 +1,55 @@
+# *****************************************************************************
+#
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+#
+# ******************************************************************************
+
+spring:
+  main:
+    allow-bean-definition-overriding: true
+  data:
+    mongodb:
+      username: admin
+      password: MONGO_PASSWORD
+      database: dlabdb
+      port: 27017
+      host: MONGO_HOST
+
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
+
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /home/OS_USER/keys/endpoint.keystore.jks
+server.ssl.key-store-password: KEY_STORE_PASSWORD
+server.ssl.key-alias: endpoint
+
+logging:
+  file: /var/opt/dlab/log/ssn/billing.log
+  level:
+    com:
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: dlab
+  resource: KEYCLOAK_CLIENT_ID
+  credentials.secret: CLIENT_SECRET
+  ssl-required: none
+  auth-server-url: KEYCLOAK_AUTH_SERVER_URL
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/deploy/billing_aws.yml b/infrastructure-provisioning/terraform/bin/deploy/billing_aws.yml
new file mode 100644
index 0000000..41add93
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/billing_aws.yml
@@ -0,0 +1,94 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+# Specify the parameters enclosed in angle brackets.
+# Run the follows command to get help for details of configuration:
+# java -jar billing-1.0.jar --help conf
+# java -jar billing-1.0.jar --help {adapter | parser | filter | logappender} [name]
+
+billingEnabled: true
+
+host: MONGO_HOST
+port: 27017
+username: admin
+password: MONGO_PASSWORD
+database: dlabdb
+
+# Adapter for reading source data. Known types: file, s3file
+adapterIn:
+  - type: s3file
+    bucket: BILLING_BUCKET_NAME
+    path: REPORT_PATH
+    awsJobEnabled: AWS_JOB_ENABLED
+    accountId: ACCOUNT_ID
+    accessKeyId: ACCESS_KEY_ID
+    secretAccessKey: SECRET_ACCESS_KEY
+
+# Adapter for writing converted data. Known types: console, file, s3file, mongodb
+adapterOut:
+  - type: mongodlab
+    host: MONGO_HOST
+    port: 27017
+    username: admin
+    password: MONGO_PASSWORD
+    database: dlabdb
+#    bufferSize: 10000
+    upsert: true
+    serviceBaseName: SERVICE_BASE_NAME
+
+# Filter for source and converted data.
+filter:
+  - type: aws
+    currencyCode: USD
+    columnDlabTag: CONF_BILLING_TAG
+    serviceBaseName: SERVICE_BASE_NAME
+
+
+# Parser of source data to common format.
+parser:
+  - type: csv
+    headerLineNo: 1
+    skipLines: 1
+    columnMapping: >-
+      dlab_id=DLAB_ID;usage_date=USAGE_DATE;product=PRODUCT;
+      usage_type=USAGE_TYPE;usage=USAGE;cost=COST;
+      resource_id=RESOURCE_ID;tags=TAGS
+    aggregate: day
+
+
+# Logging configuration.
+logging:
+ # Default logging level
+  level: INFO
+  # Logging levels for appenders.
+  loggers:
+    com.epam: DEBUG
+    org.apache.http: WARN
+    org.mongodb.driver: WARN
+    org.hibernate: WARN
+ # Logging appenders
+  appenders:
+    #- type: console
+    - type: file
+      currentLogFilename: /var/opt/dlab/log/ssn/billing.log
+      archive: true
+      archivedLogFilenamePattern: /var/opt/dlab/log/ssn/billing-%d{yyyy-MM-dd}.log.gz
+      archivedFileCount: 10
diff --git a/infrastructure-provisioning/terraform/bin/deploy/billing_azure.yml b/infrastructure-provisioning/terraform/bin/deploy/billing_azure.yml
new file mode 100644
index 0000000..6953d49
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/billing_azure.yml
@@ -0,0 +1,80 @@
+# *****************************************************************************
+#
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+#
+# ******************************************************************************
+
+spring:
+  main:
+    allow-bean-definition-overriding: true
+  data:
+    mongodb:
+      username: admin
+      password: MONGO_PASSWORD
+      database: dlabdb
+      port: MONGO_PORT
+      host: MONGO_HOST
+
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
+
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /home/OS_USER/keys/endpoint.keystore.jks
+server.ssl.key-store-password: KEY_STORE_PASSWORD
+server.ssl.key-alias: endpoint
+
+logging:
+  file: /var/opt/dlab/log/ssn/billing.log
+  level:
+    com:
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: dlab
+  resource: KEYCLOAK_CLIENT_ID
+  credentials.secret: KEYCLOAK_CLIENT_SECRET
+  ssl-required: none
+  auth-server-url: KEYCLOAK_AUTH_SERVER_URL
+
+dlab:
+  sbn: SERVICE_BASE_NAME
+  billingEnabled: true
+  clientId: CLIENT_ID
+  clientSecret: CLIENT_SECRET
+  tenantId: TENANT_ID
+  subscriptionId: SUBSCRIPTION_ID
+  authenticationFile: AUTHENTICATION_FILE
+  # Billing configuration for RateCard API. For more details please see https://msdn.microsoft.com/en-us/library/mt219004.aspx
+  offerNumber: OFFER_NUMBER
+  currency: CURRENCY
+  locale: LOCALE
+  regionInfo: REGION_INFO
+  initialDelay: 10
+  period: 60
+  aggregationOutputMongoDataSource:
+    host: MONGO_HOST
+    port: MONGO_PORT
+    username: admin
+    password: MONGO_PASSWORD
+    database: dlabdb
+  ssnStorageAccountTagName: <AZURE_SSN_STORAGE_ACCOUNT_TAG>
+  sharedStorageAccountTagName: <AZURE_SHARED_STORAGE_ACCOUNT_TAG>
+  datalakeTagName: <AZURE_DATALAKE_TAG>
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/deploy/billing_gcp.yml b/infrastructure-provisioning/terraform/bin/deploy/billing_gcp.yml
new file mode 100644
index 0000000..af793ba
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/billing_gcp.yml
@@ -0,0 +1,59 @@
+# *****************************************************************************
+#
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+#
+# ******************************************************************************
+
+spring:
+  main:
+    allow-bean-definition-overriding: true
+  data:
+    mongodb:
+      username: admin
+      password: MONGO_PASSWORD
+      database: dlabdb
+      port: 27017
+      host: MONGO_HOST
+dlab:
+  sbn: SERVICE_BASE_NAME
+  bigQueryDataset: DATASET_NAME
+  cron: 0 0 * * * *
+
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
+
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /home/OS_USER/keys/endpoint.keystore.jks
+server.ssl.key-store-password: KEY_STORE_PASSWORD
+server.ssl.key-alias: endpoint
+
+logging:
+  file: /var/opt/dlab/log/ssn/billing.log
+  level:
+    com:
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: dlab
+  resource: KEYCLOAK_CLIENT_ID
+  credentials.secret: CLIENT_SECRET
+  ssl-required: none
+  auth-server-url: KEYCLOAK_AUTH_SERVER_URL
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
new file mode 100644
index 0000000..47ee469
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
@@ -0,0 +1,1179 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import argparse
+import logging
+import random
+import string
+import sys
+import time
+import traceback
+from fabric import Connection
+from patchwork.files import exists
+
+conn = None
+args = None
+java_home = None
+
+
+def create_user():
+    initial_user = 'ubuntu'
+    sudo_group = 'sudo'
+    with Connection(host=args.hostname, user=initial_user,
+                    connect_kwargs={'key_filename': args.pkey}) as conn:
+        try:
+            if not exists(conn,
+                          '/home/{}/.ssh_user_ensured'.format(initial_user)):
+                conn.sudo('useradd -m -G {1} -s /bin/bash {0}'
+                          .format(args.os_user, sudo_group))
+                conn.sudo(
+                    'bash -c \'echo "{} ALL = NOPASSWD:ALL" >> /etc/sudoers\''.format(args.os_user, initial_user))
+                conn.sudo('mkdir /home/{}/.ssh'.format(args.os_user))
+                conn.sudo('chown -R {0}:{0} /home/{1}/.ssh/'
+                          .format(initial_user, args.os_user))
+                conn.sudo('cat /home/{0}/.ssh/authorized_keys > '
+                          '/home/{1}/.ssh/authorized_keys'
+                          .format(initial_user, args.os_user))
+                conn.sudo(
+                    'chown -R {0}:{0} /home/{0}/.ssh/'.format(args.os_user))
+                conn.sudo('chmod 700 /home/{0}/.ssh'.format(args.os_user))
+                conn.sudo('chmod 600 /home/{0}/.ssh/authorized_keys'
+                          .format(args.os_user))
+                conn.sudo(
+                    'touch /home/{}/.ssh_user_ensured'.format(initial_user))
+        except Exception as err:
+            logging.error('Failed to create new os_user: ', str(err))
+            sys.exit(1)
+
+
+def copy_keys():
+    try:
+        conn.put(args.pkey, '/home/{0}/keys/'.format(args.os_user))
+        conn.sudo('chown -R {0}:{0} /home/{0}/keys'.format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to copy admin key: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_dir_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir'.format(args.os_user)):
+            conn.sudo('mkdir /home/{}/.ensure_dir'.format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to create ~/.ensure_dir/: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_logs_endpoint():
+    log_root_dir = "/var/opt/dlab/log"
+    supervisor_log_file = "/var/log/application/provision-service.log"
+    try:
+        if not exists(conn, '/home/' + args.os_user + '/.ensure_dir/logs_ensured'):
+            if not exists(conn, args.dlab_path):
+                conn.sudo("mkdir -p " + args.dlab_path)
+                conn.sudo("chown -R " + args.os_user + ' ' + args.dlab_path)
+            if not exists(conn, log_root_dir):
+                conn.sudo('mkdir -p ' + log_root_dir + '/provisioning')
+                conn.sudo('touch ' + log_root_dir + '/provisioning/provisioning.log')
+            if not exists(conn, supervisor_log_file):
+                conn.sudo("mkdir -p /var/log/application")
+                conn.sudo("touch " + supervisor_log_file)
+            conn.sudo("chown -R {0} {1}".format(args.os_user, log_root_dir))
+            conn.sudo('touch /home/' + args.os_user + '/.ensure_dir/logs_ensured')
+    except Exception as err:
+        print('Failed to configure logs and dlab directory: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_jre_jdk_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir/jre_jdk_ensured'.format(args.os_user)):
+            conn.sudo('apt-get install -y openjdk-8-jre-headless')
+            conn.sudo('apt-get install -y openjdk-8-jdk-headless')
+            conn.sudo('touch /home/{}/.ensure_dir/jre_jdk_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to install Java JDK: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_step_certs():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir/step_ensured'.format(args.os_user)):
+            conn.sudo('wget https://github.com/smallstep/cli/releases/download/v0.13.3/step-cli_0.13.3_amd64.deb '
+                      '-O /tmp/step-cli_0.13.3_amd64.deb')
+            conn.sudo('dpkg -i /tmp/step-cli_0.13.3_amd64.deb')
+            conn.sudo('''bash -c 'echo "{0}" | base64 --decode > /etc/ssl/certs/root_ca.crt' '''.format(args.step_root_ca))
+            fingerprint = conn.sudo('step certificate fingerprint /etc/ssl/certs/root_ca.crt').stdout.replace('\n', '')
+            conn.sudo('step ca bootstrap --fingerprint {0} --ca-url "{1}"'.format(fingerprint,
+                                                                                  args.step_ca_url))
+            conn.sudo('echo "{0}" > /home/{1}/keys/provisioner_password'.format(args.step_kid_password, args.os_user))
+            if args.cloud_provider == 'aws':
+                local_ip_address = conn.sudo('curl -s '
+                                             'http://169.254.169.254/latest/meta-data/local-ipv4').stdout.replace('\n', '')
+                try:
+                    public_ip_address = conn.sudo('curl -s http://169.254.169.254/latest/meta-data/'
+                                                  'public-ipv4').stdout.replace('\n', '')
+                except:
+                    public_ip_address = None
+            elif args.cloud_provider == 'gcp':
+                local_ip_address = conn.sudo('curl -H "Metadata-Flavor: Google" '
+                                             'http://metadata/computeMetadata/v1/instance/network-interfaces/0/'
+                                             'access-configs/0/external-ip').stdout.replace('\n', '')
+                try:
+                    public_ip_address = conn.sudo('curl -H "Metadata-Flavor: Google" '
+                                                  'http://metadata/computeMetadata/v1/instance/network-interfaces/0/'
+                                                  'ip').stdout.replace('\n', '')
+                except:
+                    public_ip_address = None
+            elif args.cloud_provider == 'azure':
+                local_ip_address = conn.sudo('curl -s -H Metadata:true "http://169.254.169.254/metadata/'
+                                             'instance?api-version=2017-08-01&format=json" | jq -r ".network.'
+                                             'interface[].ipv4.ipAddress[].privateIpAddress"').stdout.replace('\n', '')
+                try:
+                    public_ip_address = conn.sudo('curl -s -H Metadata:true "http://169.254.169.254/metadata/'
+                                                  'instance?api-version=2017-08-01&format=json" | jq -r ".network.'
+                                                  'interface[].ipv4.ipAddress[].publicIpAddress"').stdout.replace('\n',
+                                                                                                                  '')
+                except:
+                    public_ip_address = None
+            else:
+                local_ip_address = None
+                public_ip_address = None
+            sans = "--san localhost --san {0} --san 127.0.0.1 ".format(local_ip_address)
+            cn = local_ip_address
+            if public_ip_address:
+                sans += "--san {0}".format(public_ip_address)
+                cn = public_ip_address
+            conn.sudo('step ca token {3} --kid {0} --ca-url "{1}" --root /etc/ssl/certs/root_ca.crt '
+                      '--password-file /home/{2}/keys/provisioner_password {4} --output-file /tmp/step_token'.format(
+                               args.step_kid, args.step_ca_url, args.os_user, cn, sans))
+            token = conn.sudo('cat /tmp/step_token').stdout.replace('\n', '')
+            conn.sudo('step ca certificate "{0}" /etc/ssl/certs/dlab.crt /etc/ssl/certs/dlab.key '
+                      '--token "{1}" --kty=RSA --size 2048 --provisioner {2} '.format(cn, token, args.step_kid))
+            conn.put('./renew_certificates.sh', '/tmp/renew_certificates.sh')
+            conn.sudo('mv /tmp/renew_certificates.sh /usr/local/bin/')
+            conn.sudo('chmod +x /usr/local/bin/renew_certificates.sh')
+            conn.sudo('sed -i "s/OS_USER/{0}/g" /usr/local/bin/renew_certificates.sh'.format(args.os_user))
+            conn.sudo('sed -i "s|JAVA_HOME|{0}|g" /usr/local/bin/renew_certificates.sh'.format(java_home))
+            conn.sudo('sed -i "s|RESOURCE_TYPE|endpoint|g" /usr/local/bin/renew_certificates.sh')
+            conn.sudo('sed -i "s|CONF_FILE|provisioning|g" /usr/local/bin/renew_certificates.sh')
+            conn.sudo('touch /var/log/renew_certificates.log')
+            conn.put('./manage_step_certs.sh', '/tmp/manage_step_certs.sh')
+            conn.sudo('mv /tmp/manage_step_certs.sh /usr/local/bin/manage_step_certs.sh')
+            conn.sudo('chmod +x /usr/local/bin/manage_step_certs.sh')
+            conn.sudo('sed -i "s|STEP_ROOT_CERT_PATH|/etc/ssl/certs/root_ca.crt|g" '
+                      '/usr/local/bin/manage_step_certs.sh')
+            conn.sudo('sed -i "s|STEP_CERT_PATH|/etc/ssl/certs/dlab.crt|g" /usr/local/bin/manage_step_certs.sh')
+            conn.sudo('sed -i "s|STEP_KEY_PATH|/etc/ssl/certs/dlab.key|g" /usr/local/bin/manage_step_certs.sh')
+            conn.sudo('sed -i "s|STEP_CA_URL|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(args.step_ca_url))
+            conn.sudo('sed -i "s|RESOURCE_TYPE|endpoint|g" /usr/local/bin/manage_step_certs.sh')
+            conn.sudo('sed -i "s|SANS|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(sans))
+            conn.sudo('sed -i "s|CN|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(cn))
+            conn.sudo('sed -i "s|KID|{0}|g" /usr/local/bin/manage_step_certs.sh'.format(args.step_kid))
+            conn.sudo('sed -i "s|STEP_PROVISIONER_PASSWORD_PATH|/home/{0}/keys/provisioner_password|g" '
+                      '/usr/local/bin/manage_step_certs.sh'.format(args.os_user))
+            conn.sudo('bash -c \'echo "0 * * * * root /usr/local/bin/manage_step_certs.sh >> '
+                      '/var/log/renew_certificates.log 2>&1" >> /etc/crontab \'')
+            conn.put('./step-cert-manager.service', '/tmp/step-cert-manager.service')
+            conn.sudo('mv /tmp/step-cert-manager.service /etc/systemd/system/step-cert-manager.service')
+            conn.sudo('systemctl daemon-reload')
+            conn.sudo('systemctl enable step-cert-manager.service')
+            conn.sudo('touch /home/{}/.ensure_dir/step_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to install Java JDK: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_supervisor_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir/superv_ensured'.format(args.os_user)):
+            conn.sudo('apt-get -y install supervisor')
+            conn.sudo('update-rc.d supervisor defaults')
+            conn.sudo('update-rc.d supervisor enable')
+            conn.sudo('touch /home/{}/.ensure_dir/superv_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to install Supervisor: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_docker_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir/docker_ensured'.format(args.os_user)):
+            conn.sudo("bash -c "
+                      "'curl -fsSL https://download.docker.com/linux/ubuntu/gpg"
+                      " | apt-key add -'")
+            conn.sudo('add-apt-repository "deb [arch=amd64] '
+                      'https://download.docker.com/linux/ubuntu '
+                      '$(lsb_release -cs) stable"')
+            conn.sudo('apt-get update')
+            conn.sudo('apt-cache policy docker-ce')
+            conn.sudo('apt-get install -y docker-ce={}'
+                      .format(args.docker_version))
+            if not exists(conn, '{}/tmp'.format(args.dlab_path)):
+                conn.run('mkdir -p {}/tmp'.format(args.dlab_path))
+            conn.put('./daemon.json',
+                     '{}/tmp/daemon.json'.format(args.dlab_path))
+            conn.sudo('sed -i "s|REPOSITORY|{}:{}|g" {}/tmp/daemon.json'
+                      .format(args.repository_address,
+                              args.repository_port,
+                              args.dlab_path))
+            if args.cloud_provider == "aws":
+                dns_ip_resolve = (conn.run("systemd-resolve --status "
+                                           "| grep -A 5 'Current Scopes: DNS' "
+                                           "| grep 'DNS Servers:' "
+                                           "| awk '{print $3}'")
+                                  .stdout.rstrip("\n\r"))
+                conn.sudo("sed -i 's|DNS_IP_RESOLVE|\"dns\": [\"{0}\"],|g' {1}/tmp/daemon.json"
+                          .format(dns_ip_resolve, args.dlab_path))
+            elif args.cloud_provider == "gcp" or args.cloud_provider == "azure":
+                dns_ip_resolve = ""
+                conn.sudo('sed -i "s|DNS_IP_RESOLVE||g" {1}/tmp/daemon.json'
+                          .format(dns_ip_resolve, args.dlab_path))
+            conn.sudo('mv {}/tmp/daemon.json /etc/docker'
+                      .format(args.dlab_path))
+            conn.sudo('usermod -a -G docker ' + args.os_user)
+            conn.sudo('update-rc.d docker defaults')
+            conn.sudo('update-rc.d docker enable')
+            conn.sudo('service docker restart')
+            conn.sudo('touch /home/{}/.ensure_dir/docker_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to install Docker: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+def ensure_mongo_endpoint():
+    try:
+        print('[INSTALLING MONGO DATABASE]')
+        if not exists(conn, '/home/{}/.ensure_dir/mongo_ensured'.format(args.os_user)):
+            conn.sudo("bash -c 'wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | sudo apt-key add -'")
+            conn.sudo("bash -c 'echo \"deb [ arch=amd64,arm64 ] "
+                      "https://repo.mongodb.org/apt/ubuntu $(lsb_release -cs)/mongodb-org/4.2 multiverse\" | sudo "
+                      "tee /etc/apt/sources.list.d/mongodb-org-4.2.list'")
+            conn.sudo('apt-get update')
+            conn.sudo('apt-get -y --allow-unauthenticated install mongodb-org')
+            conn.sudo('systemctl enable mongod.service')
+            conn.sudo('sudo apt-get -y install python-pip')
+            conn.sudo('pip install -U pymongo pyyaml --no-cache-dir ')
+            conn.sudo('touch /home/{}/.ensure_dir/mongo_ensured'
+                      .format(args.os_user))
+        print('[CONFIGURING MONGO DATABASE]')
+        if not exists(conn, '/lib/systemd/system/mongod.service'):
+            conn.put('./mongo_files/mongod.service_template', '/tmp/mongod.service_template')
+            conn.sudo('sed -i "s/MONGO_USR/mongodb/g" /tmp/mongod.service_template'.format(args.os_user))
+            conn.sudo('cp -i /tmp/mongod.service_template /lib/systemd/system/mongod.service')
+            conn.sudo('systemctl daemon-reload')
+            conn.sudo('systemctl enable mongod.service')
+        if not exists(conn, '/tmp/configure_mongo.py'):
+            conn.put('./mongo_files/configure_mongo.py', '/tmp/configure_mongo.py')
+            conn.sudo('sed -i "s|PASSWORD|{}|g" /tmp/configure_mongo.py'.format(args.mongo_password))
+        if not exists(conn, '/tmp/mongo_roles.json'):
+            conn.put('./mongo_files/gcp/mongo_roles.json', '/tmp/mongo_roles.json')
+        conn.sudo('python /tmp/configure_mongo.py')
+    except Exception as err:
+        logging.error('Failed to install Mongo: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def create_key_dir_endpoint():
+    try:
+        if not exists(conn, '/home/{}/keys'.format(args.os_user)):
+            conn.run('mkdir /home/{}/keys'.format(args.os_user))
+            if args.auth_file_path:
+                conn.put(args.auth_file_path, '/tmp/azure_auth.json')
+                conn.sudo('mv /tmp/azure_auth.json /home/{}/keys/'.format(args.os_user))
+                args.auth_file_path = '/home/{}/keys/azure_auth.json'.format(args.os_user)
+    except Exception as err:
+        logging.error('Failed create keys directory as ~/keys: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def configure_keystore_endpoint(os_user, endpoint_keystore_password):
+    try:
+        conn.sudo('openssl pkcs12 -export -in /etc/ssl/certs/dlab.crt -inkey '
+                  '/etc/ssl/certs/dlab.key -name endpoint -out /home/{0}/keys/endpoint.p12 '
+                  '-password pass:{1}'.format(os_user, endpoint_keystore_password))
+        conn.sudo('keytool -importkeystore -srckeystore /home/{0}/keys/endpoint.p12 -srcstoretype PKCS12 '
+                  '-alias endpoint -destkeystore /home/{0}/keys/endpoint.keystore.jks -deststorepass "{1}" '
+                  '-srcstorepass "{1}"'.format(os_user, endpoint_keystore_password))
+        conn.sudo('keytool -keystore /home/{0}/keys/endpoint.keystore.jks -alias step-ca -import -file '
+                  '/etc/ssl/certs/root_ca.crt  -deststorepass "{1}" -noprompt'.format(
+                   os_user, endpoint_keystore_password))
+        conn.sudo('keytool -importcert -trustcacerts -alias endpoint -file /etc/ssl/certs/dlab.crt -noprompt '
+                  '-storepass changeit -keystore {0}/lib/security/cacerts'.format(java_home))
+        conn.sudo('keytool -importcert -trustcacerts -file /etc/ssl/certs/root_ca.crt -noprompt '
+                  '-storepass changeit -keystore {0}/lib/security/cacerts'.format(java_home))
+        conn.sudo('touch /home/{0}/.ensure_dir/cert_imported'.format(os_user))
+        print("Certificates are imported.")
+    except Exception as err:
+        print('Failed to configure Keystore certificates: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def configure_supervisor_endpoint(endpoint_keystore_password):
+    try:
+        if not exists(conn,
+                      '/home/{}/.ensure_dir/configure_supervisor_ensured'.format(args.os_user)):
+            supervisor_conf = '/etc/supervisor/conf.d/supervisor_svc.conf'
+            if not exists(conn, '{}/tmp'.format(args.dlab_path)):
+                conn.run('mkdir -p {}/tmp'.format(args.dlab_path))
+            conn.put('./supervisor_svc.conf', '{}/tmp/supervisor_svc.conf'.format(args.dlab_path))
+            dlab_conf_dir = '{}/conf/'.format(args.dlab_path)
+            if not exists(conn, dlab_conf_dir):
+                conn.run('mkdir -p {}'.format(dlab_conf_dir))
+            web_path = '{}/webapp'.format(args.dlab_path)
+            if not exists(conn, web_path):
+                conn.run('mkdir -p {}'.format(web_path))
+            if args.cloud_provider == 'aws':
+                interface = conn.sudo('curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/').stdout
+                args.vpc_id = conn.sudo('curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/{}/'
+                                        'vpc-id'.format(interface)).stdout
+                args.subnet_id = conn.sudo('curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/{}/'
+                                           'subnet-id'.format(interface)).stdout
+                args.vpc2_id = args.vpc_id
+                args.subnet2_id = args.subnet_id
+                conn.sudo('sed -i "s|CONF_PARAMETER|--spring.config.location={0}billing_app.yml --conf |g" {1}/tmp/supervisor_svc.conf'
+                          .format(dlab_conf_dir, args.dlab_path))
+            elif args.cloud_provider == 'gcp' or args.cloud_provider == 'azure':
+                conn.sudo('sed -i "s|CONF_PARAMETER|--spring.config.location=|g" {}/tmp/supervisor_svc.conf'
+                          .format(args.dlab_path))
+            conn.sudo('sed -i "s|OS_USR|{}|g" {}/tmp/supervisor_svc.conf'
+                      .format(args.os_user, args.dlab_path))
+            conn.sudo('sed -i "s|WEB_CONF|{}|g" {}/tmp/supervisor_svc.conf'
+                      .format(dlab_conf_dir, args.dlab_path))
+            conn.sudo('sed -i \'s=WEB_APP_DIR={}=\' {}/tmp/supervisor_svc.conf'
+                      .format(web_path, args.dlab_path))
+            conn.sudo('cp {}/tmp/supervisor_svc.conf {}'
+                      .format(args.dlab_path, supervisor_conf))
+            conn.put('./provisioning.yml', '{}provisioning.yml'
+                     .format(dlab_conf_dir))
+            if args.resource_group_name == '':
+                args.resource_group_name = '{}-{}-resource-group'.format(args.service_base_name, args.endpoint_id)
+            if args.cloud_provider == 'azure':
+                args.region = args.region.lower().replace(' ', '')
+            cloud_properties = [
+                {
+                    'key': "OS_USER",
+                    'value': args.os_user
+                },
+                {
+                    'key': "KEYNAME",
+                    'value': args.key_name
+                },
+                {
+                    'key': "KEYSTORE_PASSWORD",
+                    'value': endpoint_keystore_password
+                },
+                {
+                    'key': "JRE_HOME",
+                    'value': java_home
+                },
+                {
+                    'key': "CLOUD_PROVIDER",
+                    'value': args.cloud_provider
+                },
+                {
+                    'key': "MONGO_HOST",
+                    'value': args.mongo_host
+                },
+                {
+                    'key': "MONGO_PORT",
+                    'value': args.mongo_port
+                },
+                {
+                    'key': "SSN_UI_HOST",
+                    'value': args.ssn_ui_host
+                },
+                {
+                    'key': "KEYCLOAK_CLIENT_ID",
+                    'value': args.keycloak_client_id
+                },
+                {
+                    'key': "CLIENT_SECRET",
+                    'value': args.keycloak_client_secret
+                },
+                {
+                    'key': "CONF_OS",
+                    'value': args.env_os
+                },
+                {
+                    'key': "SERVICE_BASE_NAME",
+                    'value': args.service_base_name
+                },
+                {
+                    'key': "EDGE_INSTANCE_SIZE",
+                    'value': "" # args.edge_instence_size
+                },
+                {
+                    'key': "SUBNET_ID",
+                    'value': args.subnet_id
+                },
+                {
+                    'key': "REGION",
+                    'value': args.region
+                },
+                {
+                    'key': "ZONE",
+                    'value': args.zone
+                },
+                {
+                    'key': "TAG_RESOURCE_ID",
+                    'value': args.tag_resource_id
+                },
+                {
+                    'key': "SG_IDS",
+                    'value': args.ssn_k8s_sg_id
+                },
+                {
+                    'key': "SSN_INSTANCE_SIZE",
+                    'value': args.ssn_instance_size
+                },
+                {
+                    'key': "VPC2_ID",
+                    'value': args.vpc2_id
+                },
+                {
+                    'key': "SUBNET2_ID",
+                    'value': args.subnet2_id
+                },
+                {
+                    'key': "CONF_KEY_DIR",
+                    'value': args.conf_key_dir
+                },
+                {
+                    'key': "VPC_ID",
+                    'value': args.vpc_id
+                },
+                {
+                    'key': "PEERING_ID",
+                    'value': args.peering_id
+                },
+                {
+                    'key': "AZURE_RESOURCE_GROUP_NAME",
+                    'value': args.resource_group_name
+                },
+                {
+                    'key': "AZURE_SSN_STORAGE_ACCOUNT_TAG",
+                    'value': args.azure_ssn_storage_account_tag
+                },
+                {
+                    'key': "AZURE_SHARED_STORAGE_ACCOUNT_TAG",
+                    'value': args.azure_shared_storage_account_tag
+                },
+                {
+                    'key': "AZURE_DATALAKE_TAG",
+                    'value': args.azure_datalake_tag
+                },
+                {
+                    'key': "AZURE_CLIENT_ID",
+                    'value': args.azure_client_id
+                },
+                {
+                    'key': "GCP_PROJECT_ID",
+                    'value': args.gcp_project_id
+                },
+                {
+                    'key': "LDAP_HOST",
+                    'value': args.ldap_host
+                },
+                {
+                    'key': "LDAP_DN",
+                    'value': args.ldap_dn
+                },
+                {
+                    'key': "LDAP_OU",
+                    'value': args.ldap_users_group
+                },
+                {
+                    'key': "LDAP_USER_NAME",
+                    'value': args.ldap_user
+                },
+                {
+                    'key': "LDAP_USER_PASSWORD",
+                    'value': args.ldap_bind_creds
+                },
+                {
+                    'key': "STEP_CERTS_ENABLED",
+                    'value': "true"
+                },
+                {
+                    'key': "STEP_ROOT_CA",
+                    'value': args.step_root_ca
+                },
+                {
+                    'key': "STEP_KID_ID",
+                    'value': args.step_kid
+                },
+                {
+                    'key': "STEP_KID_PASSWORD",
+                    'value': args.step_kid_password
+                },
+                {
+                    'key': "STEP_CA_URL",
+                    'value': args.step_ca_url
+                },
+                {
+                    'key': "SHARED_IMAGE_ENABLED",
+                    'value': args.shared_image_enabled
+                },
+                {
+                    'key': "CONF_IMAGE_ENABLED",
+                    'value': args.image_enabled
+                },
+                {
+                    'key': "KEYCLOAK_AUTH_SERVER_URL",
+                    'value': args.keycloak_auth_server_url
+                },
+                {
+                    'key': "KEYCLOAK_REALM_NAME",
+                    'value': args.keycloak_realm_name
+                },
+                {
+                    'key': "KEYCLOAK_USER_NAME",
+                    'value': args.keycloak_user_name
+                },
+                {
+                    'key': "KEYCLOAK_PASSWORD",
+                    'value': args.keycloak_user_password
+                },
+                {
+                    'key': "AZURE_AUTH_FILE_PATH",
+                    'value': args.auth_file_path
+                }
+            ]
+            for param in cloud_properties:
+                conn.sudo('sed -i "s|{0}|{1}|g" {2}provisioning.yml'
+                          .format(param['key'], param['value'], dlab_conf_dir))
+
+            conn.sudo('touch /home/{}/.ensure_dir/configure_supervisor_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to configure Supervisor: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_jar_endpoint():
+    try:
+        ensure_file = ('/home/{}/.ensure_dir/backend_jar_ensured'
+                       .format(args.os_user))
+        if not exists(conn, ensure_file):
+            web_path = '{}/webapp'.format(args.dlab_path)
+            if not exists(conn, web_path):
+                conn.run('mkdir -p {}'.format(web_path))
+            conn.run('wget -P {}  --user={} --password={} '
+                     'https://{}/repository/packages/provisioning-service-'
+                     '2.2.jar --no-check-certificate'
+                     .format(web_path, args.repository_user,
+                             args.repository_pass, args.repository_address))
+            conn.run('mv {0}/provisioning-service-2.2.jar {0}/provisioning-service.jar'
+                     .format(web_path))
+            conn.run('wget -P {}  --user={} --password={} '
+                     'https://{}/repository/packages/billing-{}-'
+                     '2.2.jar --no-check-certificate'
+                     .format(web_path, args.repository_user,
+                             args.repository_pass, args.repository_address, args.cloud_provider))
+            conn.run('mv {0}/billing-{1}-2.2.jar {0}/billing.jar'
+                     .format(web_path, args.cloud_provider))
+            conn.sudo('touch {}'.format(ensure_file))
+    except Exception as err:
+        logging.error('Failed to download jar-provisioner: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def start_supervisor_endpoint():
+    try:
+        conn.sudo("service supervisor restart")
+    except Exception as err:
+        logging.error('Unable to start Supervisor: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def get_sources():
+    try:
+        conn.run("git clone https://github.com/apache/incubator-dlab.git {0}/sources".format(args.dlab_path))
+        if args.branch_name != "":
+            conn.run("cd {0}/sources && git checkout {1} && cd".format(args.dlab_path, args.branch_name))
+    except Exception as err:
+        logging.error('Failed to download sources: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def pull_docker_images():
+    try:
+        ensure_file = ('/home/{}/.ensure_dir/docker_images_pulled'
+                       .format(args.os_user))
+        if not exists(conn, ensure_file):
+            list_images = {
+                'aws': ['base', 'edge', 'project', 'jupyter', 'rstudio', 'zeppelin', 'tensor', 'tensor-rstudio',
+                        'deeplearning', 'jupyterlab', 'dataengine-service', 'dataengine'],
+                'gcp': ['base', 'edge', 'project', 'jupyter', 'rstudio', 'zeppelin', 'tensor', 'tensor-rstudio',
+                        'deeplearning', 'superset', 'jupyterlab', 'dataengine-service', 'dataengine'],
+                'azure': ['base', 'edge', 'project', 'jupyter', 'rstudio', 'zeppelin', 'tensor', 'deeplearning',
+                          'dataengine']
+            }
+            conn.sudo('docker login -u {} -p {} {}:{}'
+                      .format(args.repository_user,
+                              args.repository_pass,
+                              args.repository_address,
+                              args.repository_port))
+            for image in list_images[args.cloud_provider]:
+                conn.sudo('docker pull {0}:{1}/docker.dlab-{3}-{2}'
+                          .format(args.repository_address, args.repository_port, args.cloud_provider, image))
+                conn.sudo('docker tag {0}:{1}/docker.dlab-{3}-{2} docker.dlab-{3}'
+                          .format(args.repository_address, args.repository_port, args.cloud_provider, image))
+                conn.sudo('docker rmi {0}:{1}/docker.dlab-{3}-{2}'
+                          .format(args.repository_address, args.repository_port, args.cloud_provider, image))
+            conn.sudo('chown -R {0}:docker /home/{0}/.docker/'
+                      .format(args.os_user))
+            conn.sudo('touch {}'.format(ensure_file))
+    except Exception as err:
+        logging.error('Failed to pull Docker images: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def id_generator(size=10, chars=string.digits + string.ascii_letters):
+    return ''.join(random.choice(chars) for _ in range(size))
+
+
+def configure_guacamole():
+    try:
+        mysql_pass = id_generator()
+        conn.sudo('docker run --name guacd --restart unless-stopped -d -p 4822:4822 guacamole/guacd')
+        conn.sudo('docker run --rm guacamole/guacamole /opt/guacamole/bin/initdb.sh --mysql > initdb.sql')
+        conn.sudo('mkdir /tmp/scripts')
+        conn.sudo('cp initdb.sql /tmp/scripts')
+        conn.sudo('mkdir -p /opt/mysql')
+        conn.sudo('docker run --name guac-mysql --restart unless-stopped -v /tmp/scripts:/tmp/scripts '
+                  ' -v /opt/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD={} -d mysql:latest'.format(mysql_pass))
+        time.sleep(180)
+        conn.sudo('touch /opt/mysql/dock-query.sql')
+        conn.sudo('chown {0}:{0} /opt/mysql/dock-query.sql'.format(args.os_user))
+        conn.sudo("""echo "CREATE DATABASE guacamole; CREATE USER 'guacamole' IDENTIFIED BY '{}';"""
+                  """ GRANT SELECT,INSERT,UPDATE,DELETE ON guacamole.* TO 'guacamole';" > /opt/mysql/dock-query.sql"""
+                  .format(mysql_pass))
+        conn.sudo('docker exec -i guac-mysql /bin/bash -c "mysql -u root -p{} < /var/lib/mysql/dock-query.sql"'
+                  .format(mysql_pass))
+        conn.sudo('docker exec -i guac-mysql /bin/bash -c "cat /tmp/scripts/initdb.sql | mysql -u root -p{} guacamole"'
+                  .format(mysql_pass))
+        conn.sudo("docker run --name guacamole --restart unless-stopped --link guacd:guacd --link guac-mysql:mysql"
+                  " -e MYSQL_DATABASE='guacamole' -e MYSQL_USER='guacamole' -e MYSQL_PASSWORD='{}'"
+                  " -d -p 8080:8080 guacamole/guacamole".format(mysql_pass))
+        # create cronjob for run containers on reboot
+        conn.sudo('mkdir -p /opt/dlab/cron')
+        conn.sudo('touch /opt/dlab/cron/mysql.sh')
+        conn.sudo('chmod 755 /opt/dlab/cron/mysql.sh')
+        conn.sudo('chown {0}:{0} //opt/dlab/cron/mysql.sh'.format(args.os_user))
+        conn.sudo('echo "docker start guacd" >> /opt/dlab/cron/mysql.sh')
+        conn.sudo('echo "docker start guac-mysql" >> /opt/dlab/cron/mysql.sh')
+        conn.sudo('echo "docker rm guacamole" >> /opt/dlab/cron/mysql.sh')
+        conn.sudo("""echo "docker run --name guacamole --restart unless-stopped --link guacd:guacd""" 
+                  """ --link guac-mysql:mysql -e MYSQL_DATABASE='guacamole' -e MYSQL_USER='guacamole' """
+                  """-e MYSQL_PASSWORD='{}' -d -p 8080:8080 guacamole/guacamole" >> """
+                  """/opt/dlab/cron/mysql.sh""".format(mysql_pass))
+        conn.sudo('''/bin/bash -c '(crontab -l 2>/dev/null; echo "@reboot sh /opt/dlab/cron/mysql.sh") |''' 
+                  ''' crontab - ' ''')
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to configure guacamole: ', str(err))
+        return False
+
+def configure_billing_endpoint(endpoint_keystore_password):
+    try:
+        if args.billing_enable:
+            conn.put('./billing_{}.yml'.format(args.cloud_provider), '{}/conf/billing.yml'
+                     .format(args.dlab_path))
+            billing_yml_path = "{}/conf/billing.yml".format(args.dlab_path)
+            if args.cloud_provider == 'aws':
+
+                conn.put('./billing_app_{}.yml'.format(args.cloud_provider), '{}/conf/billing_app.yml'
+                         .format(args.dlab_path))
+                billing_app_yml_path = "{}/conf/billing_app.yml".format(args.dlab_path)
+                billing_app_properties = [
+                    {
+                        'key': "MONGO_HOST",
+                        'value': args.mongo_host
+                    },
+                    {
+                        'key': "MONGO_PASSWORD",
+                        'value': args.mongo_password
+                    },
+                    {
+                        'key': "MONGO_PORT",
+                        'value': args.mongo_port
+                    },
+                    {
+                        'key': "OS_USER",
+                        'value': args.os_user
+                    },
+                    {
+                        'key': "KEY_STORE_PASSWORD",
+                        'value': endpoint_keystore_password
+                    },
+                    {
+                        'key': "KEYCLOAK_CLIENT_ID",
+                        'value': args.keycloak_client_id
+                    },
+                    {
+                        'key': "CLIENT_SECRET",
+                        'value': args.keycloak_client_secret
+                    },
+                    {
+                        'key': "KEYCLOAK_AUTH_SERVER_URL",
+                        'value': args.keycloak_auth_server_url
+                    }
+                ]
+                for param in billing_app_properties:
+                    conn.sudo('sed -i "s|{0}|{1}|g" {2}'
+                              .format(param['key'], param['value'], billing_app_yml_path))
+                if args.aws_job_enabled == 'true':
+                    args.tag_resource_id = 'resourceTags' + ':' + args.tag_resource_id
+                billing_properties = [
+                    {
+                        'key': "MONGO_HOST",
+                        'value': args.mongo_host
+                    },
+                    {
+                        'key': "MONGO_PASSWORD",
+                        'value': args.mongo_password
+                    },
+                    {
+                        'key': "MONGO_PORT",
+                        'value': args.mongo_port
+                    },
+                    {
+                        'key': "BILLING_BUCKET_NAME",
+                        'value': args.billing_bucket
+                    },
+                    {
+                        'key': "REPORT_PATH",
+                        'value': args.report_path
+                    },
+                    {
+                        'key': "AWS_JOB_ENABLED",
+                        'value': args.aws_job_enabled
+                    },
+                    {
+                        'key': "ACCOUNT_ID",
+                        'value': args.billing_aws_account_id
+                    },
+                    {
+                        'key': "ACCESS_KEY_ID",
+                        'value': args.access_key_id
+                    },
+                    {
+                        'key': "SECRET_ACCESS_KEY",
+                        'value': args.secret_access_key
+                    },
+                    {
+                        'key': "CONF_BILLING_TAG",
+                        'value': args.billing_tag
+                    },
+                    {
+                        'key': "SERVICE_BASE_NAME",
+                        'value': args.service_base_name
+                    },
+                    {
+                        'key': "DLAB_ID",
+                        'value': args.billing_dlab_id
+                    },
+                    {
+                        'key': "USAGE_DATE",
+                        'value': args.billing_usage_date
+                    },
+                    {
+                        'key': "PRODUCT",
+                        'value': args.billing_product
+                    },
+                    {
+                        'key': "USAGE_TYPE",
+                        'value': args.billing_usage_type
+                    },
+                    {
+                        'key': "USAGE",
+                        'value': args.billing_usage
+                    },
+                    {
+                        'key': "COST",
+                        'value': args.billing_usage_cost
+                    },
+                    {
+                        'key': "RESOURCE_ID",
+                        'value': args.billing_resource_id
+                    },
+                    {
+                        'key': "TAGS",
+                        'value': args.billing_tags
+                    }
+                ]
+            elif args.cloud_provider == 'gcp':
+                billing_properties = [
+                    {
+                        'key': "SERVICE_BASE_NAME",
+                        'value': args.service_base_name
+                    },
+                    {
+                        'key': "OS_USER",
+                        'value': args.os_user
+                    },
+                    {
+                        'key': "MONGO_PASSWORD",
+                        'value': args.mongo_password
+                    },
+                    {
+                        'key': "MONGO_PORT",
+                        'value': args.mongo_port
+                    },
+                    {
+                        'key': "MONGO_HOST",
+                        'value': args.mongo_host
+                    },
+                    {
+                        'key': "KEY_STORE_PASSWORD",
+                        'value': endpoint_keystore_password
+                    },
+                    {
+                        'key': "DATASET_NAME",
+                        'value': args.billing_dataset_name
+                    },
+                    {
+                        'key': "KEYCLOAK_CLIENT_ID",
+                        'value': args.keycloak_client_id
+                    },
+                    {
+                        'key': "CLIENT_SECRET",
+                        'value': args.keycloak_client_secret
+                    },
+                    {
+                        'key': "KEYCLOAK_AUTH_SERVER_URL",
+                        'value': args.keycloak_auth_server_url
+                    }
+                ]
+            elif args.cloud_provider == 'azure':
+                billing_properties = [
+                    {
+                        'key': "SERVICE_BASE_NAME",
+                        'value': args.service_base_name
+                    },
+                    {
+                        'key': "OS_USER",
+                        'value': args.os_user
+                    },
+                    {
+                        'key': "MONGO_PASSWORD",
+                        'value': args.mongo_password
+                    },
+                    {
+                        'key': "MONGO_PORT",
+                        'value': args.mongo_port
+                    },
+                    {
+                        'key': "MONGO_HOST",
+                        'value': args.mongo_host
+                    },
+                    {
+                        'key': "KEY_STORE_PASSWORD",
+                        'value': endpoint_keystore_password
+                    },
+                    {
+                        'key': "KEYCLOAK_CLIENT_ID",
+                        'value': args.keycloak_client_id
+                    },
+                    {
+                        'key': "KEYCLOAK_CLIENT_SECRET",
+                        'value': args.keycloak_client_secret
+                    },
+                    {
+                        'key': "KEYCLOAK_AUTH_SERVER_URL",
+                        'value': args.keycloak_auth_server_url
+                    },
+                    {
+                        'key': "CLIENT_ID",
+                        'value': args.azure_client_id
+                    },
+                    {
+                        'key': "CLIENT_SECRET",
+                        'value': args.azure_client_secret
+                    },
+                    {
+                        'key': "TENANT_ID",
+                        'value': args.tenant_id
+                    },
+                    {
+                        'key': "SUBSCRIPTION_ID",
+                        'value': args.subscription_id
+                    },
+                    {
+                        'key': "AUTHENTICATION_FILE",
+                        'value': args.auth_file_path
+                    },
+                    {
+                        'key': "OFFER_NUMBER",
+                        'value': args.offer_number
+                    },
+                    {
+                        'key': "CURRENCY",
+                        'value': args.currency
+                    },
+                    {
+                        'key': "LOCALE",
+                        'value': args.locale
+                    },
+                    {
+                        'key': "REGION_INFO",
+                        'value': args.region_info
+                    }
+                ]
+            for param in billing_properties:
+                conn.sudo('sed -i "s|{0}|{1}|g" {2}'
+                          .format(param['key'], param['value'], billing_yml_path))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to configure billing: ', str(err))
+        return False
+
+def init_args():
+    global args
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--dlab_path', type=str, default='/opt/dlab')
+    parser.add_argument('--key_name', type=str, default='', help='Name of admin key without .pem extension')
+    parser.add_argument('--endpoint_eip_address', type=str)
+    parser.add_argument('--endpoint_id', type=str, default='')
+    parser.add_argument('--pkey', type=str, default='')
+    parser.add_argument('--hostname', type=str, default='')
+    parser.add_argument('--os_user', type=str, default='dlab-user')
+    parser.add_argument('--cloud_provider', type=str, default='')
+    parser.add_argument('--mongo_host', type=str, default='localhost')
+    parser.add_argument('--mongo_port', type=str, default='27017')
+    parser.add_argument('--ss_host', type=str, default='')
+    parser.add_argument('--ss_port', type=str, default='8443')
+    parser.add_argument('--ssn_ui_host', type=str, default='')
+    parser.add_argument('--mongo_password', type=str, default='')
+    parser.add_argument('--repository_address', type=str, default='')
+    parser.add_argument('--repository_port', type=str, default='')
+    parser.add_argument('--repository_user', type=str, default='')
+    parser.add_argument('--repository_pass', type=str, default='')
+    parser.add_argument('--docker_version', type=str,
+                        default='18.06.3~ce~3-0~ubuntu')
+    parser.add_argument('--ssn_bucket_name', type=str, default='')
+    parser.add_argument('--keycloak_auth_server_url', type=str, default='')
+    parser.add_argument('--keycloak_realm_name', type=str, default='')
+    parser.add_argument('--keycloak_user_name', type=str, default='')
+    parser.add_argument('--keycloak_user_password', type=str, default='')
+    parser.add_argument('--keycloak_client_id', type=str, default='')
+    parser.add_argument('--keycloak_client_secret', type=str, default='')
+    parser.add_argument('--branch_name', type=str, default='master')  # change default
+    parser.add_argument('--env_os', type=str, default='debian')
+    parser.add_argument('--service_base_name', type=str, default='')
+    parser.add_argument('--edge_instence_size', type=str, default='t2.medium')
+    parser.add_argument('--subnet_id', type=str, default='')
+    parser.add_argument('--region', type=str, default='')
+    parser.add_argument('--zone', type=str, default='')
+    parser.add_argument('--tag_resource_id', type=str, default='user:tag')
+    parser.add_argument('--ssn_k8s_sg_id', type=str, default='')
+    parser.add_argument('--ssn_instance_size', type=str, default='t2.large')
+    parser.add_argument('--vpc2_id', type=str, default='')
+    parser.add_argument('--subnet2_id', type=str, default='')
+    parser.add_argument('--conf_key_dir', type=str, default='/root/keys/', help='Should end by symbol /')
+    parser.add_argument('--vpc_id', type=str, default='')
+    parser.add_argument('--peering_id', type=str, default='')
+    parser.add_argument('--resource_group_name', type=str, default='')
+    parser.add_argument('--azure_ssn_storage_account_tag', type=str, default='')
+    parser.add_argument('--azure_shared_storage_account_tag', type=str, default='')
+    parser.add_argument('--azure_datalake_tag', type=str, default='')
+    parser.add_argument('--azure_datalake_enabled', type=str, default='')
+    parser.add_argument('--azure_client_id', type=str, default='')
+    parser.add_argument('--azure_client_secret', type=str, default='')
+    parser.add_argument('--gcp_project_id', type=str, default='')
+    parser.add_argument('--ldap_host', type=str, default='')
+    parser.add_argument('--ldap_dn', type=str, default='')
+    parser.add_argument('--ldap_users_group', type=str, default='')
+    parser.add_argument('--ldap_user', type=str, default='')
+    parser.add_argument('--ldap_bind_creds', type=str, default='')
+    parser.add_argument('--step_root_ca', type=str, default='')
+    parser.add_argument('--step_kid', type=str, default='')
+    parser.add_argument('--step_kid_password', type=str, default='')
+    parser.add_argument('--step_ca_url', type=str, default='')
+    parser.add_argument('--shared_image_enabled', type=str, default='true')
+    parser.add_argument('--image_enabled', type=str, default='true')
+    parser.add_argument('--auth_file_path', type=str, default='')
+
+    #Billing parameter
+    parser.add_argument('--billing_enable', type=bool, default=False)
+    parser.add_argument('--aws_job_enabled', type=str, default='false')
+    parser.add_argument('--billing_bucket', type=str, default='')
+    parser.add_argument('--report_path', type=str, default='')
+    parser.add_argument('--billing_aws_account_id', type=str, default='')
+    parser.add_argument('--access_key_id', type=str, default='')
+    parser.add_argument('--secret_access_key', type=str, default='')
+    parser.add_argument('--billing_tag', type=str, default='dlab')
+    parser.add_argument('--billing_dlab_id', type=str, default='resource_tags_user_user_tag')
+    parser.add_argument('--billing_usage_date', type=str, default='line_item_usage_start_date')
+    parser.add_argument('--billing_product', type=str, default='product_product_name')
+    parser.add_argument('--billing_usage_type', type=str, default='line_item_usage_type')
+    parser.add_argument('--billing_usage', type=str, default='line_item_usage_amount')
+    parser.add_argument('--billing_usage_cost', type=str, default='line_item_blended_cost')
+    parser.add_argument('--billing_resource_id', type=str, default='line_item_resource_id')
+    parser.add_argument('--billing_tags', type=str, default='line_item_operation,line_item_line_item_description')
+    parser.add_argument('--tenant_id', type=str, default='')
+    parser.add_argument('--subscription_id', type=str, default='')
+    parser.add_argument('--offer_number', type=str, default='')
+    parser.add_argument('--currency', type=str, default='')
+    parser.add_argument('--locale', type=str, default='')
+    parser.add_argument('--region_info', type=str, default='')
+    parser.add_argument('--billing_dataset_name', type=str, default='')
+
+    # TEMPORARY
+    parser.add_argument('--ssn_k8s_nlb_dns_name', type=str, default='')
+    parser.add_argument('--ssn_k8s_alb_dns_name', type=str, default='')
+    # TEMPORARY
+
+    print(parser.parse_known_args())
+    args = parser.parse_known_args()[0]
+
+
+def update_system():
+    conn.sudo('apt-get update')
+    conn.sudo('apt-get install -y jq')
+
+
+def init_dlab_connection(ip=None, user=None,
+                         pkey=None):
+    global conn
+    if not ip:
+        ip = args.hostname
+    if not user:
+        user = args.os_user
+    if not pkey:
+        pkey = args.pkey
+    try:
+        conn = Connection(ip, user, connect_kwargs={'key_filename': pkey})
+    except Exception as err:
+        logging.error('Failed connect as dlab-user: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def set_java_home():
+    global java_home
+    command = ('bash -c "update-alternatives --query java | grep \'Value: \' '
+               '| grep -o \'/.*/jre\'" ')
+    java_home = (conn.sudo(command).stdout.rstrip("\n\r"))
+
+
+def close_connection():
+    global conn
+    conn.close()
+
+
+def start_deploy():
+    global args
+    init_args()
+    print(args)
+    if args.hostname == "":
+        args.hostname = args.endpoint_eip_address
+    endpoint_keystore_password = id_generator()
+
+    print("Start provisioning of Endpoint.")
+    time.sleep(40)
+
+    print(args)
+    logging.info("Creating dlab-user")
+    create_user()
+
+    init_dlab_connection()
+    update_system()
+
+    logging.info("Configuring ensure dir")
+    ensure_dir_endpoint()
+
+    logging.info("Configuring Logs")
+    ensure_logs_endpoint()
+
+    logging.info("Installing Java")
+    ensure_jre_jdk_endpoint()
+
+    set_java_home()
+
+    logging.info("Creating key directory")
+    create_key_dir_endpoint()
+
+    logging.info("Installing Step Certificates")
+    ensure_step_certs()
+
+    logging.info("Installing Supervisor")
+    ensure_supervisor_endpoint()
+
+    logging.info("Installing Docker")
+    ensure_docker_endpoint()
+
+    logging.info("Installing Mongo Database")
+    ensure_mongo_endpoint()
+
+    logging.info("Configuring Supervisor")
+    configure_supervisor_endpoint(endpoint_keystore_password)
+
+    logging.info("Copying admin key")
+    copy_keys()
+
+    logging.info("Configuring certificates")
+    configure_keystore_endpoint(args.os_user, endpoint_keystore_password)
+
+    logging.info("Ensure jar")
+    ensure_jar_endpoint()
+
+    logging.info("Downloading sources")
+    get_sources()
+
+    logging.info("Pulling docker images")
+    pull_docker_images()
+
+    logging.info("Configuring guacamole")
+    configure_guacamole()
+
+    logging.info("Configuring billing")
+    configure_billing_endpoint(endpoint_keystore_password)
+
+    logging.info("Starting supervisor")
+    start_supervisor_endpoint()
+
+    close_connection()
+    print("Done provisioning of Endpoint.")
+
+
+if __name__ == "__main__":
+    start_deploy()
diff --git a/infrastructure-provisioning/terraform/bin/deploy/manage_step_certs.sh b/infrastructure-provisioning/terraform/bin/deploy/manage_step_certs.sh
new file mode 100644
index 0000000..bc194a9
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/manage_step_certs.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+root_crt_path=STEP_ROOT_CERT_PATH
+crt_path=STEP_CERT_PATH
+key_path=STEP_KEY_PATH
+ca_url=STEP_CA_URL
+resource_type=RESOURCE_TYPE
+renew_status=0
+sans='SANS'
+cn=CN
+kid=KID
+provisioner_password_path=STEP_PROVISIONER_PASSWORD_PATH
+
+function log() {
+    dt=$(date '+%d/%m/%Y %H:%M:%S');
+    echo "[${dt} | ${1}]"
+}
+
+function renew_cert() {
+  log "Trying to renew certificate ${crt_path}"
+  if [ $resource_type = 'edge' ]; then
+    step ca renew ${crt_path} ${key_path} --exec 'nginx -s reload' --ca-url ${ca_url} --root ${root_crt_path} --force --expires-in 8h
+  elif [ $resource_type = 'endpoint' ]; then
+    step ca renew ${crt_path} ${key_path} --exec "/usr/local/bin/renew_certificates.sh" --ca-url ${ca_url} --root ${root_crt_path} --force --expires-in 8h
+  elif [ $resource_type = 'ssn' ]; then
+    step ca renew ${crt_path} ${key_path} --exec "/usr/local/bin/renew_certificates.sh" --ca-url ${ca_url} --root ${root_crt_path} --force --expires-in 8h && nginx -s reload
+  else
+    log "Wrong resource type. Aborting..."
+    exit 1
+  fi
+}
+
+function recreate_cert() {
+  log "Trying to recreate certificate ${crt_path}"
+  step ca token ${cn} --kid ${kid} --ca-url "${ca_url}" --root ${root_crt_path} --password-file ${provisioner_password_path} ${sans} --output-file /tmp/step_token --force
+  token=$(cat /tmp/step_token)
+  step ca certificate ${cn} ${crt_path} ${key_path} --token "${token}" --kty=RSA --size 2048 --provisioner ${kid} --force
+  if [ $resource_type = 'edge' ]; then
+    nginx -s reload
+  elif [ $resource_type = 'endpoint' ]; then
+    /usr/local/bin/renew_certificates.sh
+  elif [ $resource_type = 'ssn' ]; then
+    /usr/local/bin/renew_certificates.sh
+    nginx -s reload
+  else
+    log "Wrong resource type. Aborting..."
+    exit 1
+  fi
+}
+renew_cert
+if [ $? -eq 0 ]; then
+  log "Certificate ${crt_path} has been renewed or hasn't been expired"
+else
+  renew_status=1
+fi
+
+if [ $renew_status -ne 0 ]; then
+  recreate_cert
+  if [ $? -eq 0 ]; then
+    log "Certificate ${crt_path} has been recreated"
+  else
+    log "Failed to recreate the certificate ${crt_path}"
+  fi
+fi
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/deploy/mongo_files/configure_mongo.py b/infrastructure-provisioning/terraform/bin/deploy/mongo_files/configure_mongo.py
new file mode 100644
index 0000000..14b89b4
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/mongo_files/configure_mongo.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+from pymongo import MongoClient
+import yaml
+import subprocess
+import time
+
+path = "/etc/mongod.conf"
+outfile = "/etc/mongo_params.yml"
+
+def add_2_yml_config(path, section, param, value):
+    try:
+        try:
+            with open(path, 'r') as config_yml_r:
+                config_orig = yaml.load(config_yml_r)
+        except:
+            config_orig = {}
+        sections = []
+        for i in config_orig:
+            sections.append(i)
+        if section in sections:
+            config_orig[section].update({param:value})
+        else:
+            config_orig.update({section:{param:value}})
+        with open(path, 'w') as outfile_yml_w:
+            yaml.dump(config_orig, outfile_yml_w, default_flow_style=False)
+        return True
+    except:
+        print("Could not write the target file")
+        return False
+
+
+def read_yml_conf(path, section, param):
+    try:
+        with open(path, 'r') as config_yml:
+            config = yaml.load(config_yml)
+        result = config[section][param]
+        return result
+    except:
+        print("File does not exist")
+        return ''
+
+
+if __name__ == "__main__":
+    mongo_passwd = "PASSWORD"
+    mongo_ip = read_yml_conf(path,'net','bindIp')
+    mongo_port = read_yml_conf(path,'net','port')
+    #mongo_parameters = json.loads(args.mongo_parameters)
+    # Setting up admin's password and enabling security
+    client = MongoClient(mongo_ip + ':' + str(mongo_port))
+    pass_upd = True
+    try:
+        command = ['service', 'mongod', 'start']
+        subprocess.call(command, shell=False)
+        time.sleep(5)
+        client.dlabdb.add_user('admin', mongo_passwd, roles=[{'role':'userAdminAnyDatabase','db':'admin'}])
+        client.dlabdb.command('grantRolesToUser', "admin", roles=["readWrite"])
+        # set_mongo_parameters(client, mongo_parameters)
+
+        # client.dlabdb.security.create_index("expireAt", expireAfterSeconds=7200)
+        if add_2_yml_config(path,'security','authorization','enabled'):
+            command = ['service', 'mongod', 'restart']
+            subprocess.call(command, shell=False)
+    except:
+        print("Looks like MongoDB have already been secured")
+        pass_upd = False
+
+    # Generating output config
+    add_2_yml_config(outfile, 'network', 'ip', mongo_ip)
+    add_2_yml_config(outfile, 'network', 'port', mongo_port)
+    add_2_yml_config(outfile, 'account', 'user', 'admin')
+    if pass_upd:
+        add_2_yml_config(outfile, 'account', 'pass', mongo_passwd)
+
diff --git a/infrastructure-provisioning/terraform/bin/deploy/mongo_files/gcp/mongo_roles.json b/infrastructure-provisioning/terraform/bin/deploy/mongo_files/gcp/mongo_roles.json
new file mode 100644
index 0000000..43d12e3
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/mongo_files/gcp/mongo_roles.json
@@ -0,0 +1,268 @@
+[
+  {
+    "_id": "nbShapes_n1-highcpu-2_fetching",
+    "description": "Use n1-highcpu-2 instance shape for notebook",
+    "exploratory_shapes": [
+      "n1-highcpu-2"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbShapes_n1-highcpu-8_fetching",
+    "description": "Use n1-highcpu-8 instance shape for notebook",
+    "exploratory_shapes": [
+      "n1-highcpu-8"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbShapes_n1-highcpu-32_fetching",
+    "description": "Use n1-highcpu-32 instance shape for notebook",
+    "exploratory_shapes": [
+      "n1-highcpu-32"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbShapes_n1-highmem-4_fetching",
+    "description": "Use n1-highmem-4 instance shape for notebook",
+    "exploratory_shapes": [
+      "n1-highmem-4"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbShapes_n1-highmem-16_fetching",
+    "description": "Use n1-highmem-16 instance shape for notebook",
+    "exploratory_shapes": [
+      "n1-highmem-16"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbShapes_n1-highmem-32_fetching",
+    "description": "Use n1-highmem-32 instance shape for notebook",
+    "exploratory_shapes": [
+      "n1-highmem-32"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbShapes_n1-standard-2_fetching",
+    "description": "Use n1-standard-2 instance shape for notebook",
+    "exploratory_shapes": [
+      "n1-standard-2"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateDeeplearning",
+    "description": "Create Notebook Deep Learning",
+    "exploratories": [
+      "docker.dlab-deeplearning"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateJupyter",
+    "description": "Create Notebook Jupyter",
+    "exploratories": [
+      "docker.dlab-jupyter"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateJupyterLab",
+    "description": "Create Notebook JupyterLab",
+    "exploratories": [
+      "docker.dlab-jupyterlab"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateSuperset",
+    "description": "Create Notebook Superset",
+    "exploratories": [
+      "docker.dlab-superset"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateRstudio",
+    "description": "Create Notebook RStudio",
+    "exploratories": [
+      "docker.dlab-rstudio"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateTensor",
+    "description": "Create Notebook Jupyter with TensorFlow",
+    "exploratories": [
+      "docker.dlab-tensor"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateTensorRstudio",
+    "description": "Create Notebook RStudio with TensorFlow",
+    "exploratories": [
+      "docker.dlab-tensor-rstudio"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateZeppelin",
+    "description": "Create Notebook Apache Zeppelin",
+    "exploratories": [
+      "docker.dlab-zeppelin"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateDataEngine",
+    "description": "Create Data Engine",
+    "computationals": [
+      "docker.dlab-dataengine"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateDataEngineService",
+    "description": "Create Data Engine Service",
+    "computationals": [
+      "docker.dlab-dataengine-service"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "compShapes_n1-standard-2_fetching",
+    "description": "Use n1-standard-2 instance shape for cluster",
+    "computational_shapes": [
+      "n1-standard-2"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "compShapes_n1-highmem-4_fetching",
+    "description": "Use n1-highmem-4 instance shape for cluster",
+    "computational_shapes": [
+      "n1-highmem-4"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "compShapes_n1-highmem-16_fetching",
+    "description": "Use n1-highmem-16 instance shape for cluster",
+    "computational_shapes": [
+      "n1-highmem-16"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "compShapes_n1-highmem-32_fetching",
+    "description": "Use n1-highmem-32 instance shape for cluster",
+    "computational_shapes": [
+      "n1-highmem-32"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "compShapes_n1-highcpu-8_fetching",
+    "description": "Use n1-highcpu-8 instance shape for cluster",
+    "computational_shapes": [
+      "n1-highcpu-8"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "compShapes_n1-highcpu-2_fetching",
+    "description": "Use n1-highcpu-2 instance shape for cluster",
+    "computational_shapes": [
+      "n1-highcpu-2"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "compShapes_n1-highcpu-32_fetching",
+    "description": "Use n1-highcpu-32 instance shape for cluster",
+    "computational_shapes": [
+      "n1-highcpu-32"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbBillingReportFull",
+    "description": "View full billing report for all users",
+    "pages": [
+      "/api/infrastructure_provision/billing"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "admin",
+    "description": "Allow to execute administration operation",
+    "pages": [
+      "environment/*",
+      "/api/infrastructure/backup",
+      "/roleManagement",
+      "/api/settings",
+      "/user/settings",
+      "/api/project",
+      "/api/endpoint"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  }
+]
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/bin/deploy/mongo_files/mongod.service_template
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/bin/deploy/mongo_files/mongod.service_template
index 16da950..cddbf66 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/bin/deploy/mongo_files/mongod.service_template
@@ -19,19 +19,16 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+[Unit]
+Description=High-performance, schema-free document-oriented database
+After=network.target
+Documentation=https://docs.mongodb.org/manual
 
+[Service]
+Type=forking
+User=MONGO_USR
+Group=MONGO_USR
+ExecStart=/usr/bin/mongod --quiet --config /etc/mongod.conf
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/deploy/provisioning.yml b/infrastructure-provisioning/terraform/bin/deploy/provisioning.yml
new file mode 100644
index 0000000..abbbadf
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/provisioning.yml
@@ -0,0 +1,197 @@
+# *****************************************************************************
+#
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+#
+# ******************************************************************************
+
+<#assign LOG_ROOT_DIR="/var/opt/dlab/log">
+<#assign KEYS_DIR="/home/OS_USER/keys">
+<#assign KEY_STORE_PATH="${KEYS_DIR}/endpoint.keystore.jks">
+<#assign KEY_STORE_PASSWORD="KEYSTORE_PASSWORD">
+<#assign TRUST_STORE_PATH="JRE_HOME/lib/security/cacerts">
+<#assign TRUST_STORE_PASSWORD="changeit">
+
+# Available options are aws, azure, gcp
+<#assign CLOUD_TYPE="CLOUD_PROVIDER">
+cloudProvider: ${CLOUD_TYPE}
+
+#Switch on/off developer mode here
+<#assign DEV_MODE="false">
+devMode: ${DEV_MODE}
+
+
+mongo:
+  host: MONGO_HOST
+  port: MONGO_PORT
+  username: admin
+  password: MONGO_PASSWORD
+  database: dlabdb
+
+selfService:
+  protocol: https
+  host: SSN_UI_HOST
+  port: 443
+  jerseyClient:
+    timeout: 3s
+    connectionTimeout: 3s
+
+securityService:
+  protocol: https
+  host: localhost
+  port: 8090
+  jerseyClient:
+    timeout: 20s
+    connectionTimeout: 20s
+
+
+provisioningService:
+  protocol: https
+  host: localhost
+  port: 8084
+  jerseyClient:
+    timeout: 3s
+    connectionTimeout: 3s
+
+billingService:
+  jerseyClient:
+    timeout: 4m
+    connectionTimeout: 3s
+
+# Log out user on inactivity
+inactiveUserTimeoutMillSec: 7200000
+
+backupScriptPath: /opt/dlab/tmp/backup.py
+backupDirectory: /opt/dlab/tmp/result
+keyDirectory: ${KEYS_DIR}
+responseDirectory: /opt/dlab/tmp
+handlerDirectory: /opt/dlab/handlers
+dockerLogDirectory: ${LOG_ROOT_DIR}
+warmupPollTimeout: 2m
+resourceStatusPollTimeout: 300m
+keyLoaderPollTimeout: 30m
+requestEnvStatusTimeout: 50s
+adminKey: KEYNAME
+edgeImage: docker.dlab-edge
+fileLengthCheckDelay: 500ms
+
+<#if CLOUD_TYPE == "aws">
+emrEC2RoleDefault: EMR_EC2_DefaultRole
+emrServiceRoleDefault: EMR_DefaultRole
+</#if>
+
+processMaxThreadsPerJvm: 50
+processMaxThreadsPerUser: 5
+processTimeout: 180m
+
+handlersPersistenceEnabled: true
+
+server:
+  requestLog:
+    appenders:
+      - type: file
+        currentLogFilename: ${LOG_ROOT_DIR}/provisioning/request-provisioning.log
+        archive: true
+        archivedLogFilenamePattern: ${LOG_ROOT_DIR}/provisioning/request-provisioning-%d{yyyy-MM-dd}.log.gz
+        archivedFileCount: 10
+  applicationConnectors:
+#    - type: http
+    - type: https
+      port: 8084
+      certAlias: endpoint
+      validateCerts: false
+      keyStorePath: ${KEY_STORE_PATH}
+      keyStorePassword: ${KEY_STORE_PASSWORD}
+      trustStorePath: ${TRUST_STORE_PATH}
+      trustStorePassword: ${TRUST_STORE_PASSWORD}
+  adminConnectors:
+#    - type: http
+    - type: https
+      port: 8085
+      certAlias: endpoint
+      validateCerts: false
+      keyStorePath: ${KEY_STORE_PATH}
+      keyStorePassword: ${KEY_STORE_PASSWORD}
+      trustStorePath: ${TRUST_STORE_PATH}
+      trustStorePassword: ${TRUST_STORE_PASSWORD}
+
+logging:
+  level: INFO
+  loggers:
+    com.epam: TRACE
+    com.aegisql: INFO
+  appenders:
+<#if DEV_MODE == "true">
+    - type: console
+</#if>
+    - type: file
+      currentLogFilename: ${LOG_ROOT_DIR}/provisioning/provisioning.log
+      archive: true
+      archivedLogFilenamePattern: ${LOG_ROOT_DIR}/provisioning/provisioning-%d{yyyy-MM-dd}.log.gz
+      archivedFileCount: 10
+
+keycloakConfiguration:
+  realm: dlab
+  bearer-only: true
+  auth-server-url: KEYCLOAK_AUTH_SERVER_URL
+  ssl-required: none
+  register-node-at-startup: true
+  register-node-period: 600
+  resource: KEYCLOAK_CLIENT_ID
+  credentials:
+    secret: CLIENT_SECRET
+
+cloudProperties:
+  os: CONF_OS
+  serviceBaseName: SERVICE_BASE_NAME
+  edgeInstanceSize: EDGE_INSTANCE_SIZE
+  subnetId: SUBNET_ID
+  region: REGION
+  zone: ZONE
+  confTagResourceId: TAG_RESOURCE_ID
+  securityGroupIds: SG_IDS
+  ssnInstanceSize: SSN_INSTANCE_SIZE
+  notebookVpcId: VPC2_ID
+  notebookSubnetId: SUBNET2_ID
+  confKeyDir: CONF_KEY_DIR
+  vpcId: VPC_ID
+  peeringId: PEERING_ID
+  azureResourceGroupName: AZURE_RESOURCE_GROUP_NAME
+  ssnStorageAccountTagName: AZURE_SSN_STORAGE_ACCOUNT_TAG
+  sharedStorageAccountTagName: AZURE_SHARED_STORAGE_ACCOUNT_TAG
+  datalakeTagName: AZURE_DATALAKE_TAG
+  azureClientId: AZURE_CLIENT_ID
+  gcpProjectId: GCP_PROJECT_ID
+  imageEnabled: CONF_IMAGE_ENABLED
+  azureAuthFile: AZURE_AUTH_FILE_PATH
+  ldap:
+    host: LDAP_HOST
+    dn: LDAP_DN
+    ou: LDAP_OU
+    user: LDAP_USER_NAME
+    password: LDAP_USER_PASSWORD
+  stepCerts:
+    enabled: STEP_CERTS_ENABLED
+    rootCA: STEP_ROOT_CA
+    kid: STEP_KID_ID
+    kidPassword: STEP_KID_PASSWORD
+    caURL: STEP_CA_URL
+  keycloak:
+    auth_server_url: KEYCLOAK_AUTH_SERVER_URL
+    realm_name: KEYCLOAK_REALM_NAME
+    user: KEYCLOAK_USER_NAME
+    user_password: KEYCLOAK_PASSWORD
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/deploy/renew_certificates.sh b/infrastructure-provisioning/terraform/bin/deploy/renew_certificates.sh
new file mode 100644
index 0000000..ff3e46d
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/renew_certificates.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+KEYSTORE_PASS=$(cat /opt/dlab/conf/CONF_FILE.yml  | grep '<#assign KEY_STORE_PASSWORD' | awk -F  '\"' '{print $2}')
+
+# Removing old certificates
+keytool -delete -alias RESOURCE_TYPE -keystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -storepass "${KEYSTORE_PASS}"
+keytool -delete -alias step-ca -keystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -storepass "${KEYSTORE_PASS}"
+keytool -delete -alias step-ca -keystore JAVA_HOME/lib/security/cacerts -storepass changeit
+keytool -delete -alias RESOURCE_TYPE -keystore JAVA_HOME/lib/security/cacerts -storepass changeit
+
+# Importing new certificates to keystore
+openssl pkcs12 -export -in /etc/ssl/certs/dlab.crt -inkey /etc/ssl/certs/dlab.key -name RESOURCE_TYPE -out /home/OS_USER/keys/RESOURCE_TYPE.p12 -password pass:${KEYSTORE_PASS}
+keytool -importkeystore -srckeystore /home/OS_USER/keys/RESOURCE_TYPE.p12 -srcstoretype PKCS12 -alias RESOURCE_TYPE -destkeystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -deststorepass "${KEYSTORE_PASS}" -srcstorepass "${KEYSTORE_PASS}"
+keytool -keystore /home/OS_USER/keys/RESOURCE_TYPE.keystore.jks -alias step-ca -import -file  /etc/ssl/certs/root_ca.crt  -deststorepass "${KEYSTORE_PASS}" -noprompt
+
+
+# Adding new certificates
+keytool -importcert -trustcacerts -alias RESOURCE_TYPE -file /etc/ssl/certs/dlab.crt -noprompt -storepass changeit -keystore JAVA_HOME/lib/security/cacerts
+keytool -importcert -trustcacerts -alias step-ca -file /etc/ssl/certs/root_ca.crt -noprompt -storepass changeit -keystore JAVA_HOME/lib/security/cacerts
+
+# Restarting service
+supervisorctl restart all
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/bin/deploy/step-cert-manager.service
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/bin/deploy/step-cert-manager.service
index 16da950..994eea7 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/bin/deploy/step-cert-manager.service
@@ -19,19 +19,14 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+[Unit]
+Description=Check Step certificates
+After=network.target
 
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/manage_step_certs.sh
+TimeoutStartSec=0
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+[Install]
+WantedBy=default.target
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/deploy/supervisor_svc.conf b/infrastructure-provisioning/terraform/bin/deploy/supervisor_svc.conf
new file mode 100644
index 0000000..b170043
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/supervisor_svc.conf
@@ -0,0 +1,45 @@
+; *****************************************************************************
+;
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements.  See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership.  The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License.  You may obtain a copy of the License at
+;
+;   http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied.  See the License for the
+; specific language governing permissions and limitations
+; under the License.
+;
+; ******************************************************************************
+
+[supervisorctl]
+
+[inet_http_server]
+port = 127.0.0.1:9001
+
+[program:provserv]
+command=java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 WEB_APP_DIR/provisioning-service.jar server WEB_CONFprovisioning.yml
+directory=WEB_APP_DIR
+autorestart=true
+priority=20
+user=root
+stdout_logfile=/var/log/application/provision-service.log
+redirect_stderr=true
+environment=DLAB_CONF_DIR="WEB_CONF"
+
+[program:billing]
+command=java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 WEB_APP_DIR/billing.jar CONF_PARAMETERWEB_CONFbilling.yml
+directory=WEB_APP_DIR
+autorestart=true
+priority=20
+user=root
+stdout_logfile=/var/log/application/billing.log
+redirect_stderr=true
+environment=DLAB_CONF_DIR="WEB_CONF"
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/bin/dlab.py b/infrastructure-provisioning/terraform/bin/dlab.py
new file mode 100644
index 0000000..68b8739
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/dlab.py
@@ -0,0 +1,1362 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import argparse
+import itertools
+import json
+import logging
+import os
+import re
+import shutil
+import subprocess
+import sys
+import time
+from abc import abstractmethod
+from deploy.endpoint_fab import start_deploy
+from fabric import Connection
+from patchwork.transfers import rsync
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+logging.basicConfig(level=logging.INFO, format='%(levelname)s-%(message)s')
+INITIAL_LOCATION = os.path.dirname(os.path.abspath(__file__))
+
+
+class TerraformOutputBase:
+    @property
+    @abstractmethod
+    def output_path(self):
+        pass
+
+    @abstractmethod
+    def write(self, obj):
+        pass
+
+    @abstractmethod
+    def extract(self):
+        pass
+
+
+class LocalStorageOutputProcessor(TerraformOutputBase):
+    output_path = None
+
+    def __init__(self, path):
+        self.output_path = path
+
+    def write(self, obj):
+        """Write json string to local file
+        :param obj: json string
+        """
+        existed_data = {}
+        if os.path.isfile(self.output_path):
+            with open(self.output_path, 'r') as fp:
+                output = fp.read()
+                if len(output):
+                    existed_data = json.loads(output)
+        existed_data.update(obj)
+
+        with open(self.output_path, 'w') as fp:
+            json.dump(existed_data, fp)
+        pass
+
+    def extract(self):
+        """Extract data from local file
+        :return: dict
+        """
+        if os.path.isfile(self.output_path):
+            with open(self.output_path, 'r') as fp:
+                output = fp.read()
+                if len(output):
+                    return json.loads(output)
+
+
+def extract_args(cli_args):
+    args = []
+    for key, value in cli_args.items():
+        if not value:
+            continue
+        if type(value) == list:
+            quoted_list = ['"{}"'.format(item) for item in value]
+            joined_values = ', '.join(quoted_list)
+            value = '[{}]'.format(joined_values)
+        args.append((key, value))
+    return args
+
+
+def get_var_args_string(cli_args):
+    """Convert dict of cli argument into string
+
+    Args:
+        cli_args: dict of cli arguments
+    Returns:
+        str: string of joined key=values
+    """
+    args = extract_args(cli_args)
+    args = ["-var '{0}={1}'".format(key, value) for key, value in args]
+    return ' '.join(args)
+
+
+def get_args_string(cli_args):
+    """Convert dict of cli argument into string
+
+    Args:
+        cli_args: dict of cli arguments
+    Returns:
+        str: string of joined key=values
+    """
+
+    args = extract_args(cli_args)
+    args = ["{0} {1}".format(key, value) for key, value in args]
+    return ' '.join(args)
+
+
+class ParamsBuilder:
+
+    def __init__(self):
+        self.__params = []
+
+    def add(self, arg_type, name, desc, **kwargs):
+        default_group = ['all_args']
+        if isinstance(kwargs.get('group'), str):
+            default_group.append(kwargs.get('group'))
+        if isinstance(kwargs.get('group'), (list, tuple)):
+            default_group.extend(kwargs.get('group'))
+
+        parameter = {
+            'group': default_group,
+            'name': name,
+            'props': {
+                'help': desc,
+                'type': arg_type,
+                'default': kwargs.get('default'),
+                'choices': kwargs.get('choices'),
+                'nargs': kwargs.get('nargs'),
+                'action': kwargs.get('action'),
+                'required': kwargs.get('required'),
+            }
+        }
+        self.__params.append(parameter)
+        return self
+
+    def add_str(self, name, desc, **kwargs):
+        return self.add(str, name, desc, **kwargs)
+
+    def add_bool(self, name, desc, **kwargs):
+        return self.add(self.str2bool, name, desc, **kwargs)
+
+    def add_int(self, name, desc, **kwargs):
+        return self.add(int, name, desc, **kwargs)
+
+    @staticmethod
+    def str2bool(v):
+        if isinstance(v, bool):
+            return v
+        if v.lower() in ('yes', 'true', 't', 'y', '1'):
+            return True
+        elif v.lower() in ('no', 'false', 'f', 'n', '0'):
+            return False
+        else:
+            raise argparse.ArgumentTypeError('Boolean value expected.')
+
+    def build(self):
+        return self.__params
+
+
+class Console:
+
+    @staticmethod
+    def execute_to_command_line(command):
+        """ Execute cli command
+
+        Args:
+            command: str cli command
+        Returns:
+            str: command result
+        """
+        process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
+                                   stderr=subprocess.STDOUT,
+                                   universal_newlines=True)
+
+        while True:
+            nextline = process.stdout.readline()
+            print(nextline)
+            if nextline == '' and process.poll() is not None:
+                break
+            if 'error' in nextline.lower():
+                sys.exit(0)
+
+    @staticmethod
+    def execute(command):
+        """ Execute cli command
+
+        Args:
+            command: str cli command
+        Returns:
+            str: command result
+        """
+        return os.popen(command).read()
+
+    @staticmethod
+    def ssh(ip, name, pkey):
+        attempt = 0
+        while attempt < 12:
+            logging.info('connection attempt {}'.format(attempt))
+            connection = Connection(
+                host=ip,
+                user=name,
+                connect_kwargs={'key_filename': pkey,
+                                'allow_agent': False,
+                                'look_for_keys': False,
+                                })
+            try:
+                connection.run('ls')
+                return connection
+            except Exception as ex:
+                logging.error(ex)
+                attempt += 1
+                time.sleep(10)
+
+
+class TerraformProviderError(Exception):
+    """
+    Raises errors while terraform provision
+    """
+    pass
+
+
+class TerraformProvider:
+
+    def __init__(self, no_color=False):
+        self.no_color = '-no-color' if no_color else ''
+
+    def initialize(self):
+        """Initialize terraform
+
+        Returns:
+             bool: init successful
+        Raises:
+            TerraformProviderError: if initialization was not succeed
+        """
+        logging.info('terraform init')
+        terraform_success_init = 'Terraform has been successfully initialized!'
+        command = 'terraform init {}'.format(self.no_color)
+        terraform_init_result = Console.execute(command)
+        logging.info(terraform_init_result)
+        if terraform_success_init not in terraform_init_result:
+            raise TerraformProviderError(terraform_init_result)
+
+    def validate(self):
+        """Validate terraform
+
+        Returns:
+             bool: validation successful
+        Raises:
+            TerraformProviderError: if validation status was not succeed
+
+        """
+        logging.info('terraform validate')
+        terraform_success_validate = 'Success!'
+        terraform_validate_result = Console.execute(
+            'terraform validate {}'.format(self.no_color))
+        logging.info(terraform_validate_result)
+        if terraform_success_validate not in terraform_validate_result:
+            raise TerraformProviderError(terraform_validate_result)
+
+    def apply(self, tf_params, cli_args):
+        """Run terraform
+
+        Args:
+            tf_params: dict of terraform parameters
+            cli_args: dict of parameters
+        Returns:
+             None
+        """
+        logging.info('terraform apply')
+
+        args_str = get_var_args_string(cli_args)
+        params_str = get_args_string(tf_params)
+        command = ('terraform apply -auto-approve {} {} {}'
+                   .format(self.no_color, params_str, args_str))
+        logging.info(command)
+        Console.execute_to_command_line(command)
+
+    def destroy(self, tf_params, cli_args, keep_state_file=False):
+        """Destroy terraform
+
+        Args:
+            tf_params: dict of terraform parameters
+            cli_args: dict of parameters
+            keep_state_file: Boolean
+        Returns:
+             None
+        """
+        logging.info('terraform destroy')
+        args_str = get_var_args_string(cli_args)
+        params_str = get_args_string(tf_params)
+        command = ('terraform destroy -auto-approve {} {} {}'
+                   .format(self.no_color, params_str, args_str))
+        logging.info(command)
+        Console.execute_to_command_line(command)
+        if not keep_state_file:
+            state_file = tf_params['-state']
+            state_file_backup = tf_params['-state'] + '.backup'
+            if os.path.isfile(state_file):
+                os.remove(state_file)
+            if os.path.isfile(state_file_backup):
+                os.remove(state_file_backup)
+
+    @staticmethod
+    def output(tf_params, *args):
+        """Get terraform output
+
+        Args:
+            tf_params: dict of terraform parameters
+            *args: list of str parameters
+        Returns:
+            str: terraform output result
+        """
+        params = get_args_string(tf_params)
+        return Console.execute('terraform output {} {}'
+                               .format(params, ' '.join(args)))
+
+
+class AbstractDeployBuilder:
+    def __init__(self):
+
+        args = self.parse_args()
+        self.service_args = args.get('service')
+        self.no_color = self.service_args.get('no_color')
+        state_dir = self.service_args.get('state')
+        if not state_dir:
+            self.output_dir = None
+            self.tf_output = os.path.join(INITIAL_LOCATION, 'output.json')
+            self.tf_params = {}
+        else:
+            if os.path.isdir(state_dir) and os.access(state_dir, os.W_OK):
+                service_name = (args.get(self.terraform_args_group_name)
+                                .get('service_base_name'))
+                self.output_dir = (os.path.join(state_dir, service_name))
+                self.tf_output = os.path.join(self.output_dir, 'output.json')
+                self.tf_params = {
+                    '-state': os.path.join(
+                        self.output_dir, '{}.tfstate'.format(self.name))
+                }
+            else:
+                sys.stdout.write('path doesn\'t exist')
+                sys.exit(1)
+        if self.use_tf_output_file:
+            self.fill_sys_argv_from_file()
+        self.terraform_args = self.parse_args().get(
+            self.terraform_args_group_name)
+
+    @property
+    @abstractmethod
+    def terraform_location(self):
+        """ get Terraform location
+
+        Returns:
+            str: TF script location
+        """
+        raise NotImplementedError
+
+    @property
+    @abstractmethod
+    def name(self):
+        """ get Terraform name
+
+        Returns:
+            str: TF name
+        """
+        raise NotImplementedError
+
+    @property
+    @abstractmethod
+    def terraform_args_group_name(self):
+        """ get Terraform location
+
+        Returns:
+            str: TF script location
+        """
+        raise NotImplementedError
+
+    @property
+    @abstractmethod
+    def cli_args(self):
+        """Get cli arguments
+
+        Returns:
+            dict: dictionary of client arguments
+                  with name as key and props as value
+        """
+        raise NotImplementedError
+
+    @abstractmethod
+    def deploy(self):
+        """Post terraform execution
+
+        Returns:
+            None
+        """
+        raise NotImplementedError
+
+    @property
+    def use_tf_output_file(self):
+        return False
+
+    def apply(self):
+        """Apply terraform"""
+        terraform = TerraformProvider(self.no_color)
+        terraform.apply(self.tf_params, self.terraform_args)
+
+    def destroy(self):
+        """Destory terraform"""
+        terraform = TerraformProvider(self.no_color)
+        terraform.destroy(self.tf_params, self.terraform_args)
+
+    def store_output_to_file(self):
+        """Extract terraform output and store to file"""
+        terraform = TerraformProvider(self.no_color)
+        output = terraform.output(self.tf_params, '-json')
+        output = {key: value.get('value')
+                  for key, value in json.loads(output).items()}
+        output_writer = LocalStorageOutputProcessor(self.tf_output)
+        output_writer.write(output)
+
+    def update_extracted_file_data(self, obj):
+        """
+        :param obj:
+        :return:
+        Override method if you need to modify extracted from file data
+        """
+        pass
+
+    def fill_sys_argv_from_file(self):
+        """Extract data from file and fill sys args"""
+        output_processor = LocalStorageOutputProcessor(self.tf_output)
+        output = output_processor.extract()
+        if output:
+            self.update_extracted_file_data(output)
+            for key, value in output.items():
+                key = '--' + key
+                if key not in sys.argv:
+                    sys.argv.extend([key, value])
+                else:
+                    try:
+                        index = sys.argv.index(key)
+                        sys.argv[index + 1] = value
+                    except:
+                        pass
+
+    def parse_args(self):
+        """Get dict of arguments
+
+        Returns:
+            dict: CLI arguments
+        """
+        parsers = {}
+        args = []
+
+        for arg in self.cli_args:
+            group = arg.get('group')
+            if isinstance(group, (list, tuple)):
+                for item in group:
+                    args.append(dict(arg.copy(), **{'group': item}))
+            else:
+                args.append(arg)
+
+        cli_args = sorted(args, key=lambda x: x.get('group'))
+        args_groups = itertools.groupby(cli_args, lambda x: x.get('group'))
+        for group, args in args_groups:
+            parser = argparse.ArgumentParser()
+            for arg in args:
+                parser.add_argument(arg.get('name'), **arg.get('props'))
+            parsers[group] = parser
+        return {
+            group: vars(parser.parse_known_args()[0])
+            for group, parser in parsers.items()
+        }
+
+    def validate_params(self):
+        params = self.parse_args()[self.terraform_args_group_name]
+        if len(params.get('service_base_name')) > 20:
+            sys.stderr.write('service_base_name length should be less then 20')
+            sys.exit(1)
+        if not re.match("^[a-z0-9\-]+$", params.get('service_base_name')):
+            sys.stderr.write('service_base_name should contain only lowercase '
+                             'alphanumetic characters and hyphens')
+            sys.exit(1)
+
+    def provision(self):
+        """Execute terraform script
+
+        Returns:
+            None
+        Raises:
+            TerraformProviderError: if init or validate fails
+        """
+        self.validate_params()
+        tf_location = self.terraform_location
+        terraform = TerraformProvider(self.no_color)
+        os.chdir(tf_location)
+        try:
+            terraform.initialize()
+            terraform.validate()
+        except TerraformProviderError as ex:
+            raise Exception('Error while provisioning {}'.format(ex))
+
+
+class AWSK8sSourceBuilder(AbstractDeployBuilder):
+
+    def __init__(self):
+        super(AWSK8sSourceBuilder, self).__init__()
+        self._args = self.parse_args()
+        self._ip = None
+        self._user_name = self.args.get(self.terraform_args_group_name).get(
+            'os_user')
+        self._pkey_path = self.args.get('service').get('pkey')
+
+    @property
+    def name(self):
+        return 'ssn-k8s'
+
+    @property
+    def args(self):
+        return self._args
+
+    @property
+    def ip(self):
+        return self._ip
+
+    @ip.setter
+    def ip(self, ip):
+        self._ip = ip
+
+    @property
+    def user_name(self):
+        return self._user_name
+
+    @property
+    def pkey_path(self):
+        return self._pkey_path
+
+    @property
+    def terraform_location(self):
+        tf_dir = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
+        return os.path.join(tf_dir, 'aws/ssn-k8s/main')
+
+    @property
+    def terraform_args_group_name(self):
+        return 'k8s'
+
+    def validate_params(self):
+        super(AWSK8sSourceBuilder, self).validate_params()
+        params = self.parse_args()['all_args']
+        if params.get('ssn_k8s_masters_count', 1) < 1:
+            sys.stderr.write('ssn_k8s_masters_count should be greater then 0')
+            sys.exit(1)
+        if params.get('ssn_k8s_workers_count', 3) < 3:
+            sys.stderr.write('ssn_k8s_masters_count should be minimum 3')
+            sys.exit(1)
+        # Temporary condition for Jenkins job
+        if 'endpoint_id' in params and len(params.get('endpoint_id')) > 12:
+            sys.stderr.write('endpoint_id length should be less then 12')
+            sys.exit(1)
+
+    @property
+    def cli_args(self):
+        params = ParamsBuilder()
+        (params
+         .add_bool('--no_color', 'no color console_output', group='service',
+                   default=False)
+         .add_str('--state', 'State file path', group='service')
+         .add_str('--access_key_id', 'AWS Access Key ID', required=True,
+                  group='k8s')
+         .add_str('--allowed_cidrs',
+                  'CIDR to allow acces to SSN K8S cluster.',
+                  default=["0.0.0.0/0"], action='append', group='k8s')
+         .add_str('--ami', 'ID of EC2 AMI.', required=True, group='k8s')
+         .add_str('--env_os', 'OS type.', default='debian',
+                  choices=['debian', 'redhat'], group=('k8s'))
+         .add_str('--key_name', 'Name of EC2 Key pair.', required=True,
+                  group='k8s')
+         .add_str('--os_user', 'Name of DLab service user.',
+                  default='dlab-user', group='k8s')
+         .add_str('--pkey', 'path to key', required=True, group='service')
+         .add_str('--region', 'Name of AWS region.', default='us-west-2',
+                  group=('k8s'))
+         .add_str('--secret_access_key', 'AWS Secret Access Key',
+                  required=True,
+                  group='k8s')
+         .add_str('--service_base_name',
+                  'Any infrastructure value (should be unique if '
+                  'multiple SSN\'s have been deployed before).',
+                  default='k8s', group=('k8s', 'helm_charts'))
+         .add_int('--ssn_k8s_masters_count', 'Count of K8S masters.',
+                  default=3,
+                  group='k8s')
+         .add_int('--ssn_k8s_workers_count', 'Count of K8S workers', default=2,
+                  group=('k8s', 'helm_charts'))
+         .add_str('--ssn_k8s_masters_shape', 'Shape for SSN K8S masters.',
+                  default='t2.medium', group=('k8s'))
+         .add_str('--ssn_k8s_workers_shape', 'Shape for SSN K8S workers.',
+                  default='t2.medium', group='k8s')
+         .add_int('--ssn_root_volume_size', 'Size of root volume in GB.',
+                  default=30, group='k8s')
+         .add_str('--subnet_cidr_a',
+                  'CIDR for Subnet creation in zone a. Conflicts with  subnet_id_a.',
+                  default='172.31.0.0/24', group='k8s')
+         .add_str('--subnet_cidr_b',
+                  'CIDR for Subnet creation in zone b. Conflicts with  subnet_id_b.',
+                  default='172.31.1.0/24', group='k8s')
+         .add_str('--subnet_cidr_c',
+                  'CIDR for Subnet creation in zone c. Conflicts with  subnet_id_c.',
+                  default='172.31.2.0/24', group='k8s')
+         .add_str('--subnet_id_a',
+                  'ID of AWS Subnet in zone a if you already have subnet created.',
+                  group='k8s')
+         .add_str('--subnet_id_b',
+                  'ID of AWS Subnet in zone b if you already have subnet created.',
+                  group='k8s')
+         .add_str('--subnet_id_c',
+                  'ID of AWS Subnet in zone c if you already have subnet created.',
+                  group='k8s')
+         .add_str('--vpc_cidr', 'CIDR for VPC creation. Conflicts with vpc_id',
+                  default='172.31.0.0/16', group='k8s')
+         .add_str('--vpc_id', 'ID of AWS VPC if you already have VPC created.',
+                  group='k8s')
+         .add_str('--zone', 'Name of AWS zone', default='a',
+                  group=('k8s'))
+         .add_str('--ldap_host', 'ldap host', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_dn', 'ldap dn', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_user', 'ldap user', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_bind_creds', 'ldap bind creds', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_users_group', 'ldap users group', required=True,
+                  group='helm_charts')
+         .add_str('--tag_resource_id', 'Tag resource ID.',
+                  default='user:tag', group=('k8s', 'helm_charts'))
+         .add_str('--additional_tag', 'Additional tag.',
+                  default='product:dlab', group='k8s')
+         .add_str('--billing_bucket', 'Billing bucket name',
+                  group='helm_charts')
+         .add_str('--billing_bucket_path',
+                  'The path to billing reports directory in S3 bucket',
+                  default='',
+                  group='helm_charts')
+         .add_str('--billing_aws_job_enabled',
+                  'Billing format. Available options: true (aws), false(epam)',
+                  default='false',
+                  group='helm_charts')
+         .add_str('--billing_aws_account_id',
+                  'The ID of Amazon account', default='',
+                  group='helm_charts')
+         .add_str('--billing_dlab_id',
+                  'Column name in report file that contains dlab id tag',
+                  default='resource_tags_user_user_tag',
+                  group='helm_charts')
+         .add_str('--billing_usage_date',
+                  'Column name in report file that contains usage date tag',
+                  default='line_item_usage_start_date',
+                  group='helm_charts')
+         .add_str('--billing_product',
+                  'Column name in report file that contains product name tag',
+                  default='product_product_name',
+                  group='helm_charts')
+         .add_str('--billing_usage_type',
+                  'Column name in report file that contains usage type tag',
+                  default='line_item_usage_type',
+                  group='helm_charts')
+         .add_str('--billing_usage',
+                  'Column name in report file that contains usage tag',
+                  default='line_item_usage_amount',
+                  group='helm_charts')
+         .add_str('--billing_cost',
+                  'Column name in report file that contains cost tag',
+                  default='line_item_blended_cost',
+                  group='helm_charts')
+         .add_str('--billing_resource_id',
+                  'Column name in report file that contains dlab resource id tag',
+                  default='line_item_resource_id',
+                  group='helm_charts')
+         .add_str('--billing_tags',
+                  'Column name in report file that contains tags',
+                  default='line_item_operation,line_item_line_item_description',
+                  group='helm_charts')
+         .add_str('--billing_tag', 'Billing tag', default='dlab',
+                  group='helm_charts')
+         .add_bool('--custom_certs_enabled', 'Enable custom certificates',
+                   default=False, group=('service', 'helm_charts'))
+         .add_str('--custom_cert_path', 'custom_cert_path', default='', group=('service', 'helm_charts'))
+         .add_str('--custom_key_path', 'custom_key_path', default='', group=('service', 'helm_charts'))
+         .add_str('--custom_certs_host', 'custom certs host', default='', group='helm_charts')
+         # Tmp for jenkins job
+         .add_str('--endpoint_id', 'Endpoint Id',
+                  default='user:tag', group=())
+         )
+        return params.build()
+
+    def check_k8s_cluster_status(self):
+        """ Check for kubernetes status
+
+        Returns:
+            None
+        Raises:
+            TerraformProviderError: if master or kubeDNS is not running
+
+        """
+        start_time = time.time()
+        while True:
+            with Console.ssh(self.ip, self.user_name, self.pkey_path) as c:
+                k8c_info_status = c.run(
+                    'kubectl cluster-info | '
+                    'sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]//g"') \
+                    .stdout
+
+            kubernetes_success_status = 'Kubernetes master is running'
+            kubernetes_dns_success_status = 'KubeDNS is running'
+
+            kubernetes_succeed = kubernetes_success_status in k8c_info_status
+            kube_dns_succeed = kubernetes_dns_success_status in k8c_info_status
+
+            if kubernetes_succeed and kube_dns_succeed:
+                break
+            if (time.time() - start_time) >= 600:
+                raise TimeoutError
+            time.sleep(60)
+
+    def check_tiller_status(self):
+        """ Check tiller status
+
+        Returns:
+            None
+        Raises:
+            TerraformProviderError: if tiller is not running
+
+        """
+        start_time = time.time()
+
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as c:
+            while True:
+                tiller_status = c.run(
+                    "kubectl get pods --all-namespaces "
+                    "| grep tiller | awk '{print $4}'").stdout
+                tiller_success_status = 'Running'
+                if tiller_success_status in tiller_status:
+                    break
+                if (time.time() - start_time) >= 1200:
+                    raise TimeoutError
+                time.sleep(60)
+
+    def select_master_ip(self):
+        terraform = TerraformProvider(self.no_color)
+        output = terraform.output(self.tf_params,
+                                  '-json ssn_k8s_masters_ip_addresses')
+        ips = json.loads(output)
+        if not ips:
+            raise TerraformProviderError('no ips')
+        self.ip = ips[0]
+
+    def copy_terraform_to_remote(self):
+        logging.info('transfer terraform dir to remote')
+        tf_dir = os.path.abspath(
+            os.path.join(os.getcwd(), os.path.pardir, os.path.pardir))
+        source = os.path.join(tf_dir, 'ssn-helm-charts')
+        remote_dir = '/home/{}/terraform/'.format(self.user_name)
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as conn:
+            conn.run('mkdir -p {}'.format(remote_dir))
+            rsync(conn, source, remote_dir, strict_host_keys=False)
+
+    def copy_cert(self):
+        logging.info('transfer certificates to remote')
+        cert_path = self.service_args.get('custom_cert_path')
+        key_path = self.service_args.get('custom_key_path')
+        remote_dir = '/tmp/' # .format(self.user_name)
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as conn:
+            conn.run('mkdir -p {}'.format(remote_dir))
+            rsync(conn, cert_path, remote_dir, strict_host_keys=False)
+            rsync(conn, key_path, remote_dir, strict_host_keys=False)
+
+    def run_remote_terraform(self):
+        logging.info('apply helm charts')
+        args = self.parse_args()
+        # dns_name = json.loads(TerraformProvider(self.no_color)
+        #                       .output(self.tf_params,
+        #                               '-json ssn_k8s_alb_dns_name'))
+        nlb_dns_name = json.loads(TerraformProvider(self.no_color)
+                                  .output(self.tf_params,
+                                          '-json ssn_k8s_nlb_dns_name'))
+        logging.info('apply ssn-helm-charts')
+        terraform_args = args.get('helm_charts')
+        args_str = get_var_args_string(terraform_args)
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as conn:
+            with conn.cd('terraform/ssn-helm-charts/main'):
+                init = conn.run('terraform init').stdout.lower()
+                validate = conn.run('terraform validate').stdout.lower()
+                if 'success' not in init or 'success' not in validate:
+                    raise TerraformProviderError
+                command = ('terraform apply -auto-approve {} '
+                           '-var \'ssn_k8s_nlb_dns_name={}\''
+                           .format(args_str, nlb_dns_name))
+                logging.info(command)
+                conn.run(command)
+                output = ' '.join(conn.run('terraform output -json')
+                                  .stdout.split())
+                self.fill_args_from_dict(json.loads(output))
+
+    def output_terraform_result(self):
+        # dns_name = json.loads(
+        #     TerraformProvider(self.no_color).output(self.tf_params,
+        #                                             '-json nginx_load_balancer_hostname'))
+        ssn_k8s_sg_id = json.loads(
+            TerraformProvider(self.no_color).output(self.tf_params,
+                                                    '-json ssn_k8s_sg_id'))
+        ssn_subnet = json.loads(
+            TerraformProvider(self.no_color).output(self.tf_params,
+                                                    '-json ssn_subnet_id'))
+        ssn_vpc_id = json.loads(
+            TerraformProvider(self.no_color).output(self.tf_params,
+                                                    '-json ssn_vpc_id'))
+
+        logging.info("""
+        DLab SSN K8S cluster has been deployed successfully!
+        Summary:
+        VPC ID: {}
+        Subnet ID:  {}
+        SG IDs: {}
+        """.format(ssn_vpc_id, ssn_subnet, ssn_k8s_sg_id))
+
+    def fill_args_from_dict(self, output):
+        for key, value in output.items():
+            value = value.get('value')
+            sys.argv.extend(['--' + key, value])
+
+    def fill_remote_terraform_output(self):
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as conn:
+            with conn.cd('terraform/ssn-helm-charts/main'):
+                output = ' '.join(conn.run('terraform output -json')
+                                  .stdout.split())
+                self.fill_args_from_dict(json.loads(output))
+                output_processor = LocalStorageOutputProcessor(self.tf_output)
+                output = {key: value.get('value')
+                          for key, value in json.loads(output).items()}
+                output_processor.write(output)
+
+    @staticmethod
+    def add_ip_to_known_hosts(ip):
+        attempt = 0
+        while attempt < 10:
+            if len(Console.execute('ssh-keygen -H -F {}'.format(ip))) == 0:
+                Console.execute(
+                    'ssh-keyscan {} >> ~/.ssh/known_hosts'.format(ip))
+                attempt += 1
+            else:
+                break
+
+    def destroy_remote_terraform(self):
+        logging.info('destroy helm charts')
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as conn:
+            with conn.cd('terraform/ssn-helm-charts/main'):
+                init = conn.run('terraform init').stdout.lower()
+                validate = conn.run('terraform validate').stdout.lower()
+                if 'success' not in init or 'success' not in validate:
+                    raise TerraformProviderError
+                command = 'terraform destroy -auto-approve'
+                logging.info(command)
+                conn.run(command)
+
+    def deploy(self):
+        logging.info('deploy')
+        output = ' '.join(
+            TerraformProvider(self.no_color).output(self.tf_params,
+                                                    '-json').split())
+        self.fill_args_from_dict(json.loads(output))
+        self.select_master_ip()
+        self.add_ip_to_known_hosts(self.ip)
+        self.check_k8s_cluster_status()
+        self.check_tiller_status()
+        self.copy_terraform_to_remote()
+        if self.service_args.get('custom_certs_enabled'):
+            self.copy_cert()
+        self.run_remote_terraform()
+        self.fill_remote_terraform_output()
+        self.output_terraform_result()
+
+    def destroy(self):
+        self.select_master_ip()
+        try:
+            self.destroy_remote_terraform()
+        except:
+            print("Error with destroying helm charts.")
+        super(AWSK8sSourceBuilder, self).destroy()
+        if self.output_dir is not None:
+            shutil.rmtree(self.output_dir)
+        elif os.path.isfile(os.path.join(INITIAL_LOCATION, 'output.json')):
+            os.remove(os.path.join(INITIAL_LOCATION, 'output.json'))
+
+
+class AWSEndpointBuilder(AbstractDeployBuilder):
+
+    def update_extracted_file_data(self, obj):
+        if 'ssn_vpc_id' in obj:
+            obj['vpc_id'] = obj['ssn_vpc_id']
+        if 'ssn_subnet_id' in obj:
+            obj['subnet_id'] = obj['ssn_subnet_id']
+
+    @property
+    def name(self):
+        return 'endpoint'
+
+    @property
+    def use_tf_output_file(self):
+        return True
+
+    @property
+    def terraform_location(self):
+        tf_dir = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
+        return os.path.join(tf_dir, 'aws/endpoint/main')
+
+    @property
+    def terraform_args_group_name(self):
+        return 'endpoint'
+
+    def validate_params(self):
+        super(AWSEndpointBuilder, self).validate_params()
+        params = self.parse_args()[self.terraform_args_group_name]
+        if len(params.get('endpoint_id')) > 12:
+            sys.stderr.write('endpoint_id length should be less then 12')
+            sys.exit(1)
+
+    @property
+    def cli_args(self):
+        params = ParamsBuilder()
+        (params
+         .add_bool('--no_color', 'no color console_output', group='service',
+                   default=False)
+         .add_str('--state', 'State file path', group='service')
+         .add_str('--secret_access_key', 'AWS Secret Access Key',
+                  required=True,
+                  group='endpoint')
+         .add_str('--access_key_id', 'AWS Access Key ID', required=True,
+                  group='endpoint')
+         .add_str('--pkey', 'path to key', required=True, group='service')
+         .add_str('--service_base_name',
+                  'Any infrastructure value (should be unique if  multiple '
+                  'SSN\'s have been deployed before). Should be  same as on ssn',
+                  group='endpoint')
+         .add_str('--vpc_id', 'ID of AWS VPC if you already have VPC created.',
+                  group='endpoint')
+         .add_str('--vpc_cidr',
+                  'CIDR for VPC creation. Conflicts with vpc_id.',
+                  default='172.31.0.0/16', group='endpoint')
+         .add_str('--subnet_id',
+                  'ID of Subnet if you already have subnet created.',
+                  group='endpoint')
+         .add_str('--ssn_k8s_sg_id', 'ID of SSN SG.', group='endpoint')
+         .add_str('--subnet_cidr',
+                  'CIDR for Subnet creation. Conflicts with subnet_id.',
+                  default='172.31.0.0/24', group='endpoint')
+         .add_str('--ami', 'ID of AMI.', group='endpoint')
+         .add_str('--key_name', 'Name of EC2 Key pair.', required=True,
+                  group='endpoint')
+         .add_str('--endpoint_id', 'Endpoint id.', required=True,
+                  group='endpoint')
+         .add_str('--region', 'Name of AWS region.', default='us-west-2',
+                  group='endpoint')
+         .add_str('--zone', 'Name of AWS zone.', default='a', group='endpoint')
+         .add_str('--network_type',
+                  'Type of created network (if network is not existed and '
+                  'require creation) for endpoint',
+                  default='public', group='endpoint')
+         .add_str('--endpoint_instance_shape', 'Instance shape of Endpoint.',
+                  default='t2.medium', group='endpoint')
+         .add_int('--endpoint_volume_size', 'Size of root volume in GB.',
+                  default=30, group='endpoint')
+         .add_str('--product', 'Product name.', default='dlab',
+                  group='endpoint')
+         .add_str('--additional_tag', 'Additional tag.',
+                  default='product:dlab', group='endpoint')
+         .add_str('--ldap_host', 'ldap host', required=True,
+                  group='endpoint')
+         .add_str('--ldap_dn', 'ldap dn', required=True,
+                  group='endpoint')
+         .add_str('--ldap_user', 'ldap user', required=True,
+                  group='endpoint')
+         .add_str('--ldap_bind_creds', 'ldap bind creds', required=True,
+                  group='endpoint')
+         .add_str('--ldap_users_group', 'ldap users group', required=True,
+                  group='endpoint')
+         .add_bool('--billing_enable', 'Billing enable', group='endpoint', default=False)
+         .add_str('--mongo_password', 'Mongo database password', group='endpoint')
+         .add_str('--mongo_host', 'Mongo database host', group='endpoint', default='localhost')
+         .add_str('--billing_bucket', 'Billing bucket name', group='endpoint', default='')
+         .add_str('--report_path', 'The path to report folder', group='endpoint', default='')
+         .add_str('--aws_job_enabled', 'Billing format. Available options: true (aws), false(epam)', group='endpoint',
+                  default='false')
+         .add_str('--billing_aws_account_id', 'The ID of ASW linked account', group='endpoint', default='')
+         .add_str('--billing_tag', 'Billing tag', group='endpoint', default='dlab')
+         )
+        return params.build()
+
+    def deploy(self):
+        self.fill_sys_argv_from_file()
+        new_dir = os.path.abspath(
+            os.path.join(os.getcwd(), '../../../bin/deploy'))
+        os.chdir(new_dir)
+        start_deploy()
+
+
+class GCPK8sSourceBuilder(AbstractDeployBuilder):
+
+    # def update_extracted_file_data(self, obj):
+    #     if 'ssn_vpc_id' in obj:
+    #         obj['vpc_id'] = obj['ssn_vpc_id']
+
+    @property
+    def name(self):
+        return 'k8s'
+
+    @property
+    def use_tf_output_file(self):
+        return True
+
+    @property
+    def terraform_location(self):
+        tf_dir = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
+        return os.path.join(tf_dir, 'gcp/ssn-gke/main')
+
+    @property
+    def terraform_args_group_name(self):
+        return 'k8s'
+
+    def validate_params(self):
+        super(GCPK8sSourceBuilder, self).validate_params()
+        # params = self.parse_args()[self.terraform_args_group_name]
+        # if len(params.get('endpoint_id')) > 12:
+        #     sys.stderr.write('endpoint_id length should be less then 12')
+        #     sys.exit(1)
+
+    @property
+    def cli_args(self):
+        params = ParamsBuilder()
+        (params
+         .add_bool('--no_color', 'no color console_output', group='service',
+                   default=False)
+         .add_str('--state', 'State file path', group='service')
+         .add_str('--namespace', 'Name of namespace', group='k8s')
+         .add_str('--credentials_file_path', 'Path to creds file', group='k8s', required=True)
+         .add_str('--project_id', 'Project ID', group='k8s', required=True)
+         .add_str('--region', 'Region name', group='k8s', required=True)
+         .add_str('--zone', 'Zone name', group='k8s', required=True)
+         .add_str('--vpc_name', 'VPC name', group='k8s')
+         .add_str('--subnet_name', 'Subnet name', group='k8s')
+         .add_str('--service_base_name', 'Service base name', group='k8s', required=True)
+         .add_str('--subnet_cidr', 'Subnet CIDR', group='k8s')
+         .add_str('--additional_tag', 'Additional tag', group='k8s')
+         .add_str('--ssn_k8s_workers_count', 'Number of workers per zone', group='k8s')
+         .add_str('--gke_cluster_version', 'GKE version', group='k8s')
+         .add_str('--ssn_k8s_workers_shape', 'Workers shape', group='k8s')
+         .add_str('--service_account_iam_roles', 'Array of roles', group='k8s')
+         .add_str('--ssn_k8s_alb_dns_name', 'DNS name', group='k8s')
+         .add_str('--keycloak_user', 'Keycloak user name', group='k8s')
+         .add_str('--mysql_user', 'MySQL user name', group='k8s')
+         .add_str('--mysql_db_name', 'MySQL database name', group='k8s')
+         .add_str('--ldap_usernameAttr', 'LDAP username attr', group='k8s', default='uid')
+         .add_str('--ldap_rdnAttr', 'LDAP rdn attr', group='k8s', default='uid')
+         .add_str('--ldap_uuidAttr', 'LDAP uuid attr', group='k8s', default='uid')
+         .add_str('--ldap_users_group', 'LDAP users group', group='k8s', default='ou=People')
+         .add_str('--ldap_dn', 'LDAP DN', group='k8s', default='dc=example,dc=com')
+         .add_str('--ldap_user', 'LDAP user', group='k8s', default='cn=admin')
+         .add_str('--ldap_bind_creds', 'LDAP user password', group='k8s', required=True)
+         .add_str('--ldap_host', 'LDAP host', group='k8s', required=True)
+         .add_str('--mongo_db_username', 'Mongo user name', group='k8s')
+         .add_str('--mongo_dbname', 'Mongo database name', group='k8s')
+         .add_str('--mongo_image_tag', 'Mongo image tag', group='k8s')
+         .add_str('--mongo_service_port', 'Mongo service port', group='k8s')
+         .add_str('--mongo_node_port', 'Mongo node port', group='k8s')
+         .add_str('--mongo_service_name', 'Mongo service name', group='k8s')
+         .add_str('--env_os', 'Environment Operating system', group='k8s', default='debian')
+         .add_str('--big_query_dataset', 'Big query dataset name for billing', group='k8s', default='test')
+         .add_str('--custom_certs_enabled', 'If custom certs enabled', group='k8s')
+         .add_str('--custom_cert_path', 'Custom cert path', group='k8s')
+         .add_str('--custom_key_path', 'Custom key path', group='k8s')
+         .add_str('--custom_certs_host', 'Custom cert host ', group='k8s')
+         .add_str('--mysql_disk_size', 'MySQL disk size', group='k8s')
+         .add_str('--domain', 'Domain name', group='k8s', required=True)
+         )
+        return params.build()
+
+    def apply(self):
+        terraform = TerraformProvider(self.no_color)
+        gke_params = self.tf_params.copy()
+        helm_charts_params = self.tf_params.copy()
+
+        gke_params['-target'] = 'module.gke_cluster'
+        helm_charts_params['-target'] = 'module.helm_charts'
+
+        terraform.apply(gke_params, self.terraform_args)
+        terraform.apply(helm_charts_params, self.terraform_args)
+
+    def deploy(self):
+        pass
+
+    def destroy(self):
+        terraform = TerraformProvider(self.no_color)
+        gke_params = self.tf_params.copy()
+        helm_charts_params = self.tf_params.copy()
+
+        gke_params['-target'] = 'module.gke_cluster'
+        helm_charts_params['-target'] = 'module.helm_charts'
+
+        terraform.destroy(helm_charts_params, self.terraform_args, True)
+        time.sleep(60)
+        terraform.destroy(gke_params, self.terraform_args)
+
+
+class GCPEndpointBuilder(AbstractDeployBuilder):
+
+    def update_extracted_file_data(self, obj):
+        if 'ssn_vpc_id' in obj:
+            obj['vpc_id'] = obj['ssn_vpc_id']
+
+    @property
+    def name(self):
+        return 'endpoint'
+
+    @property
+    def use_tf_output_file(self):
+        return True
+
+    @property
+    def terraform_location(self):
+        tf_dir = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
+        return os.path.join(tf_dir, 'gcp/endpoint/main')
+
+    @property
+    def terraform_args_group_name(self):
+        return 'endpoint'
+
+    def validate_params(self):
+        super(GCPEndpointBuilder, self).validate_params()
+        params = self.parse_args()[self.terraform_args_group_name]
+        if len(params.get('endpoint_id')) > 12:
+            sys.stderr.write('endpoint_id length should be less then 12')
+            sys.exit(1)
+
+    @property
+    def cli_args(self):
+        params = ParamsBuilder()
+        (params
+         .add_bool('--no_color', 'no color console_output', group='service',
+                   default=False)
+         .add_str('--state', 'State file path', group='service')
+         .add_str('--gcp_project_id', 'GCP project ID', required=True, group='endpoint')
+         .add_str('--creds_file', 'Path to crdes file', required=True, group='endpoint')
+         .add_str('--pkey', 'path to key', required=True, group='service')
+         .add_str('--service_base_name', 'Service base name', group='endpoint')
+         .add_str('--vpc_id', 'ID of VPC if you already have VPC created.', group='endpoint')
+         .add_str('--subnet_cidr', 'CIDR for Subnet creation. Conflicts with vpc_id.', default='172.31.0.0/24',
+                  group='endpoint')
+         .add_str('--ssn_subnet', 'ID of AWS Subnet if you already have subnet created.', group='endpoint')
+         .add_str('--subnet_id', 'ID of subnet', group='endpoint')
+         .add_str('--ami', 'ID of EC2 AMI.', group='endpoint')
+         .add_str('--path_to_pub_key', 'Path to public key', required=True, group='endpoint')
+         .add_str('--endpoint_id', 'Endpoint id.', required=True, group='endpoint')
+         .add_str('--region', 'Name of region.', group='endpoint')
+         .add_str('--zone', 'Name of zone.', group='endpoint')
+         .add_str('--endpoint_shape', 'Instance shape of Endpoint.', group='endpoint')
+         .add_str('--endpoint_volume_size', 'Endpoint disk size', group='endpoint')
+         .add_str('--additional_tag', 'Additional tag.', default='product:dlab', group='endpoint')
+         .add_str('--ldap_host', 'ldap host', required=True, group='endpoint')
+         .add_str('--ldap_dn', 'ldap dn', required=True, group='endpoint')
+         .add_str('--ldap_user', 'ldap user', required=True, group='endpoint')
+         .add_str('--ldap_bind_creds', 'ldap bind creds', required=True, group='endpoint')
+         .add_str('--ldap_users_group', 'ldap users group', required=True, group='endpoint')
+         .add_str('--firewall_ing_cidr_range', 'Ingress range', group='endpoint')
+         .add_str('--firewall_eg_cidr_range', 'Egress range', group='endpoint')
+         .add_str('--endpoint_policies', 'Endpoint policies list', group='endpoint')
+         .add_str('--endpoint_roles', 'Endpoint roles list', group='endpoint')
+         .add_str('--bucket_region', 'Bucket region', group='endpoint')
+         .add_bool('--billing_enable', 'Billing enable', group='endpoint', default=False)
+         .add_str('--billing_dataset_name', 'Billing dataset name', group='endpoint')
+         .add_str('--mongo_password', 'Mongo database password', group='endpoint')
+         .add_str('--mongo_host', 'Mongo database host', group='endpoint', default='localhost')
+         )
+        return params.build()
+
+    def deploy(self):
+        self.fill_sys_argv_from_file()
+        new_dir = os.path.abspath(
+            os.path.join(os.getcwd(), '../../../bin/deploy'))
+        os.chdir(new_dir)
+        start_deploy()
+
+
+class AzureEndpointBuilder(AbstractDeployBuilder):
+
+    def update_extracted_file_data(self, obj):
+        if 'ssn_vpc_id' in obj:
+            obj['vpc_id'] = obj['ssn_vpc_id']
+
+    @property
+    def name(self):
+        return 'endpoint'
+
+    @property
+    def use_tf_output_file(self):
+        return True
+
+    @property
+    def terraform_location(self):
+        tf_dir = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
+        return os.path.join(tf_dir, 'azure/endpoint/main')
+
+    @property
+    def terraform_args_group_name(self):
+        return 'endpoint'
+
+    def validate_params(self):
+        super(AzureEndpointBuilder, self).validate_params()
+        params = self.parse_args()[self.terraform_args_group_name]
+        if len(params.get('endpoint_id')) > 12:
+            sys.stderr.write('endpoint_id length should be less then 12')
+            sys.exit(1)
+
+    @property
+    def cli_args(self):
+        params = ParamsBuilder()
+        (params
+         .add_bool('--no_color', 'no color console_output', group='service',
+                   default=False)
+         .add_str('--state', 'State file path', group='service')
+         .add_str('--auth_file_path', 'Path to crdes file', required=True, group='endpoint')
+         .add_str('--pkey', 'path to key', required=True, group='service')
+         .add_str('--service_base_name', 'Service base name', group='endpoint')
+         .add_str('--resource_group_name', 'Resource group name', group='endpoint')
+         .add_str('--vpc_id', 'ID of VPC if you already have VPC created.', group='endpoint')
+         .add_str('--vpc_cidr', 'CIDR for VPC creation. Conflicts with vpc_id.', default='172.31.0.0/16',
+                  group='endpoint')
+         .add_str('--subnet_cidr', 'CIDR for Subnet creation. Conflicts with vpc_id.', default='172.31.0.0/24',
+                  group='endpoint')
+         .add_str('--ssn_subnet', 'ID of AWS Subnet if you already have subnet created.', group='endpoint')
+         .add_str('--subnet_id', 'ID of subnet', group='endpoint')
+         .add_str('--ami', 'ID of EC2 AMI.', group='endpoint')
+         .add_str('--key_path', 'Path to public key', required=True, group='endpoint')
+         .add_str('--endpoint_id', 'Endpoint id.', required=True, group='endpoint')
+         .add_str('--region', 'Name of region.', group='endpoint')
+         .add_str('--endpoint_shape', 'Instance shape of Endpoint.', default='Standard_DS2_v2', group='endpoint')
+         .add_str('--endpoint_volume_size', 'Endpoint disk size', default='30', group='endpoint')
+         .add_str('--additional_tag', 'Additional tag.', default='product:dlab', group='endpoint')
+         .add_str('--tenant_id', 'Azure tenant ID', group='endpoint', default='')
+         .add_str('--subscription_id', 'Azure subscription ID', group='endpoint', default='')
+         .add_str('--offer_number', 'Azure offer number', group='endpoint', default='')
+         .add_str('--currency', 'Azure currency for billing', group='endpoint', default='')
+         .add_str('--locale', 'Azure locale', group='endpoint', default='')
+         .add_str('--region_info', 'Azure region info', group='endpoint', default='')
+         .add_str('--mongo_password', 'Mongo database password', group='endpoint')
+         .add_str('--mongo_host', 'Mongo database host', group='endpoint', default='localhost')
+         .add_bool('--billing_enable', 'Billing enable', group='endpoint', default=False)
+         )
+        return params.build()
+
+    def deploy(self):
+        self.fill_sys_argv_from_file()
+        new_dir = os.path.abspath(
+            os.path.join(os.getcwd(), '../../../bin/deploy'))
+        os.chdir(new_dir)
+        start_deploy()
+
+
+class DeployDirector:
+
+    def build(self, action, builder):
+        """ Do build action
+        Args:
+            builder: AbstractDeployBuilder
+        Returns:
+            None
+        """
+        try:
+            builder.provision()
+            if action == 'deploy':
+                builder.apply()
+                builder.store_output_to_file()
+                builder.deploy()
+            if action == 'destroy':
+                builder.destroy()
+
+        except Exception as ex:
+            print(ex)
+
+
+def deploy():
+    actions = {'deploy', 'destroy'}
+
+    sources_targets = {
+        'aws': ['k8s', 'endpoint'],
+        'gcp': ['k8s', 'endpoint'],
+        'azure': ['endpoint']
+    }
+
+    no_args_error = ('usage: ./dlab {} {} {}\n'.format(
+        actions,
+        set(sources_targets.keys()),
+        set(itertools.chain(*sources_targets.values()))))
+    no_source_error = (
+        lambda x: ('usage: ./dlab {} {} {}\n'.format(
+            x,
+            set(sources_targets.keys()),
+            set(itertools.chain(*sources_targets.values())))))
+    no_target_error = (
+        lambda x, y: ('usage: ./dlab {} {} {}\n'.format(
+            x, y, set(itertools.chain(*sources_targets.values())))))
+
+    if len(sys.argv) == 1 or sys.argv[1] not in actions:
+        sys.stderr.write(no_args_error)
+        exit(1)
+    if len(sys.argv) == 2 or sys.argv[2] not in sources_targets:
+        sys.stderr.write(no_source_error(sys.argv[1]))
+        exit(1)
+    if len(sys.argv) == 3 or sys.argv[3] not in sources_targets[sys.argv[2]]:
+        sys.stderr.write(no_target_error(sys.argv[1], sys.argv[2]))
+
+    module, action, source, target = sys.argv[:4]
+    builders_dict = {
+        'aws': {
+            'k8s': AWSK8sSourceBuilder,
+            'endpoint': AWSEndpointBuilder
+        },
+        'gcp': {
+            'k8s': GCPK8sSourceBuilder,
+            'endpoint': GCPEndpointBuilder
+        },
+        'azure': {
+            'endpoint': AzureEndpointBuilder
+        }
+    }
+    builder = builders_dict[source][target]()
+    deploy_director = DeployDirector()
+    deploy_director.build(action, builder)
+
+
+if __name__ == '__main__':
+    deploy()
diff --git a/infrastructure-provisioning/terraform/bin/terraform-cli.py b/infrastructure-provisioning/terraform/bin/terraform-cli.py
new file mode 100755
index 0000000..f8d593b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/terraform-cli.py
@@ -0,0 +1,680 @@
+#!/usr/bin/env python
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import abc
+import argparse
+import itertools
+import json
+import logging
+import os
+import os.path
+import sys
+import time
+from deploy.endpoint_fab import start_deploy
+from fabric import Connection
+from patchwork.transfers import rsync
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+logging.basicConfig(level=logging.INFO,
+                    format='%(levelname)s-%(message)s')
+
+
+def get_args_string(cli_args):
+    """Convert dict of cli argument into string
+
+    Args:
+        cli_args: dict of cli arguments
+    Returns:
+        str: string of joined key=values
+    """
+    args = []
+    for key, value in cli_args.items():
+        if not value:
+            continue
+        if type(value) == list:
+            quoted_list = ['"{}"'.format(item) for item in value]
+            joined_values = ', '.join(quoted_list)
+            value = '[{}]'.format(joined_values)
+        args.append("-var '{0}={1}'".format(key, value))
+    return ' '.join(args)
+
+
+class TerraformProviderError(Exception):
+    """
+    Raises errors while terraform provision
+    """
+    pass
+
+
+class Console:
+    @staticmethod
+    def execute(command):
+        """ Execute cli command
+
+        Args:
+            command: str cli command
+        Returns:
+            str: command result
+        """
+        return os.popen(command).read()
+
+    @staticmethod
+    def ssh(ip, name, pkey):
+        while True:
+            return Connection(host=ip,
+                              user=name,
+                              connect_kwargs={'key_filename': pkey,
+                                              'allow_agent': False,
+                                              'look_for_keys': False,
+                                              })
+
+
+
+class TerraformProvider:
+    def initialize(self):
+        """Initialize terraform
+
+        Returns:
+             bool: init successful
+        Raises:
+            TerraformProviderError: if initialization was not succeed
+        """
+        logging.info('terraform init')
+        terraform_success_init = 'Terraform has been successfully initialized!'
+        terraform_init_result = Console.execute('terraform init')
+        logging.info(terraform_init_result)
+        if terraform_success_init not in terraform_init_result:
+            raise TerraformProviderError(terraform_init_result)
+
+    def validate(self):
+        """Validate terraform
+
+        Returns:
+             bool: validation successful
+        Raises:
+            TerraformProviderError: if validation status was not succeed
+
+        """
+        logging.info('terraform validate')
+        terraform_success_validate = 'Success!'
+        terraform_validate_result = Console.execute('terraform validate')
+        logging.info(terraform_validate_result)
+        if terraform_success_validate not in terraform_validate_result:
+            raise TerraformProviderError(terraform_validate_result)
+
+    def apply(self, cli_args):
+        """Run terraform
+
+        Args:
+            target: str
+            cli_args: dict of parameters
+        Returns:
+             None
+        """
+        logging.info('terraform apply')
+        args_str = get_args_string(cli_args)
+        command = 'terraform apply -auto-approve {}'
+        result = Console.execute(command.format(args_str))
+        logging.info(result)
+
+    def destroy(self, cli_args):
+        """Destroy terraform
+
+        Args:
+            target: str
+            cli_args: dict of parameters
+        Returns:
+             None
+        """
+        logging.info('terraform destroy')
+        args_str = get_args_string(cli_args)
+        command = 'terraform destroy -auto-approve {}'
+        Console.execute(command.format(args_str))
+
+    def output(self, *args):
+        """Get terraform output
+
+        Args:
+            *args: list of str parameters
+        Returns:
+            str: terraform output result
+        """
+        return Console.execute('terraform output {}'.format(' '.join(args)))
+
+
+class AbstractDeployBuilder:
+
+    @property
+    @abc.abstractmethod
+    def terraform_location(self):
+        """ get Terraform location
+
+        Returns:
+            str: TF script location
+        """
+        raise NotImplementedError
+
+    @property
+    @abc.abstractmethod
+    def terraform_args_group_name(self):
+        """ get Terraform location
+
+        Returns:
+            str: TF script location
+        """
+        raise NotImplementedError
+
+    @property
+    @abc.abstractmethod
+    def cli_args(self):
+        """Get cli arguments
+
+        Returns:
+            dict: dictionary of client arguments
+                  with name as key and props as value
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def deploy(self):
+        """Post terraform execution
+
+        Returns:
+            None
+        """
+        raise NotImplementedError
+
+    def parse_args(self):
+        """Get dict of arguments
+
+        Returns:
+            dict: CLI arguments
+        """
+        parsers = {}
+        args = []
+
+        for arg in self.cli_args:
+            group = arg.get('group')
+            if isinstance(group, (list, tuple)):
+                for item in group:
+                    args.append(dict(arg.copy(), **{'group': item}))
+            else:
+                args.append(arg)
+
+        cli_args = sorted(args, key=lambda x: x.get('group'))
+        args_groups = itertools.groupby(cli_args, lambda x: x.get('group'))
+        for group, args in args_groups:
+            parser = argparse.ArgumentParser()
+            for arg in args:
+                parser.add_argument(arg.get('name'), **arg.get('props'))
+            parsers[group] = parser
+        return {
+            group: vars(parser.parse_known_args()[0])
+            for group, parser in parsers.items()
+        }
+
+    def provision(self):
+        """Execute terraform script
+
+        Returns:
+            None
+        Raises:
+            TerraformProviderError: if init or validate fails
+        """
+        tf_location = self.terraform_location
+        cli_args = self.parse_args()
+        action = cli_args.get('service').get('action')
+        terraform_args = cli_args.get(self.terraform_args_group_name)
+        terraform = TerraformProvider()
+
+        os.chdir(tf_location)
+        try:
+            terraform.initialize()
+            terraform.validate()
+            if action == 'deploy':
+                terraform.apply(terraform_args)
+            elif action == 'destroy':
+                terraform.destroy(terraform_args)
+        except TerraformProviderError as ex:
+            raise Exception('Error while provisioning {}'.format(ex))
+
+    def get_node_ip(self, output):
+        """Extract ip
+
+        Args:
+            output: str of terraform output
+        Returns:
+            str: extracted ip
+
+        """
+
+        ips = json.loads(output)
+        if not ips:
+            raise TerraformProviderError('no ips')
+        return ips[0]
+
+
+class DeployDirector:
+
+    def build(self, *builders):
+        """ Do build action
+
+        Args:
+            builder: AbstractDeployBuilder
+        Returns:
+            None
+        """
+        try:
+            for builder in builders:
+                builder.provision()
+                builder.deploy()
+        except Exception as ex:
+            print(ex)
+
+    def get_status(self):
+        """ Get execution status
+
+        Returns:
+            int: Execution error status (0 if success)
+        """
+
+        return 0
+
+
+class ParamsBuilder:
+
+    def __init__(self):
+        self.__params = []
+
+    def add(self, arg_type, name, desc, **kwargs):
+        parameter = {
+            'group': kwargs.get('group'),
+            'name': name,
+            'props': {
+                'help': desc,
+                'type': arg_type,
+                'default': kwargs.get('default'),
+                'choices': kwargs.get('choices'),
+                'nargs': kwargs.get('nargs'),
+                'action': kwargs.get('action'),
+                'required': kwargs.get('required'),
+            }
+        }
+        self.__params.append(parameter)
+        return self
+
+    def add_str(self, name, desc, **kwargs):
+        return self.add(str, name, desc, **kwargs)
+
+    def add_int(self, name, desc, **kwargs):
+        return self.add(int, name, desc, **kwargs)
+
+    def build(self):
+        return self.__params
+
+
+class AWSK8sSourceBuilder(AbstractDeployBuilder):
+
+    def __init__(self):
+        super(AWSK8sSourceBuilder, self).__init__()
+        self._args = self.parse_args()
+        self._ip = None
+        self._user_name = self.args.get(self.terraform_args_group_name).get(
+            'os_user')
+        self._pkey_path = self.args.get('service').get('pkey')
+
+    @property
+    def args(self):
+        return self._args
+
+    @property
+    def ip(self):
+        return self._ip
+
+    @ip.setter
+    def ip(self, ip):
+        self._ip = ip
+
+    @property
+    def user_name(self):
+        return self._user_name
+
+    @property
+    def pkey_path(self):
+        return self._pkey_path
+
+    @property
+    def terraform_location(self):
+        tf_dir = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
+        return os.path.join(tf_dir, 'aws/ssn-k8s/main')
+
+    @property
+    def terraform_args_group_name(self):
+        return 'k8s'
+
+    @property
+    def cli_args(self):
+        params = ParamsBuilder()
+        (params
+         .add_str('--action', 'Action', default='deploy',
+                  group='service')
+         .add_str('--access_key_id', 'AWS Access Key ID', required=True,
+                  group='k8s')
+         .add_str('--allowed_cidrs',
+                  'CIDR to allow acces to SSN K8S cluster.',
+                  default=["0.0.0.0/0"], action='append', group='k8s')
+         .add_str('--ami', 'ID of EC2 AMI.', required=True, group='k8s')
+         .add_str('--env_os', 'OS type.', default='debian',
+                  choices=['debian', 'redhat'], group='k8s')
+         .add_str('--key_name', 'Name of EC2 Key pair.', required=True,
+                  group='k8s')
+         .add_str('--os_user', 'Name of DLab service user.',
+                  default='dlab-user', group='k8s')
+         .add_str('--pkey', 'path to key', required=True, group='service')
+         .add_str('--region', 'Name of AWS region.', default='us-west-2',
+                  group='k8s')
+         .add_str('--secret_access_key', 'AWS Secret Access Key', required=True,
+                  group='k8s')
+         .add_str('--service_base_name',
+                  'Any infrastructure value (should be unique if '
+                  'multiple SSN\'s have been deployed before).',
+                  default='dlab-k8s', group='k8s')
+         .add_int('--ssn_k8s_masters_count', 'Count of K8S masters.', default=3,
+                  group='k8s')
+         .add_int('--ssn_k8s_workers_count', 'Count of K8S workers', default=2,
+                  group=('k8s', 'helm_charts'))
+         .add_str('--ssn_k8s_masters_shape', 'Shape for SSN K8S masters.',
+                  default='t2.medium', group='k8s')
+         .add_str('--ssn_k8s_workers_shape', 'Shape for SSN K8S workers.',
+                  default='t2.medium', group='k8s')
+         .add_int('--ssn_root_volume_size', 'Size of root volume in GB.',
+                  default=30, group='k8s')
+         .add_str('--subnet_cidr_a',
+                  'CIDR for Subnet creation in zone a. Conflicts with  subnet_id_a.',
+                  default='172.31.0.0/24', group='k8s')
+         .add_str('--subnet_cidr_b',
+                  'CIDR for Subnet creation in zone b. Conflicts with  subnet_id_b.',
+                  default='172.31.1.0/24', group='k8s')
+         .add_str('--subnet_cidr_c',
+                  'CIDR for Subnet creation in zone c. Conflicts with  subnet_id_c.',
+                  default='172.31.2.0/24', group='k8s')
+         .add_str('--subnet_id_a',
+                  'ID of AWS Subnet in zone a if you already have subnet created.',
+                  group='k8s')
+         .add_str('--subnet_id_b',
+                  'ID of AWS Subnet in zone b if you already have subnet created.',
+                  group='k8s')
+         .add_str('--subnet_id_c',
+                  'ID of AWS Subnet in zone c if you already have subnet created.',
+                  group='k8s')
+         .add_str('--vpc_cidr', 'CIDR for VPC creation. Conflicts with vpc_id',
+                  default='172.31.0.0/16', group='k8s')
+         .add_str('--vpc_id', 'ID of AWS VPC if you already have VPC created.',
+                  group='k8s')
+         .add_str('--zone', 'Name of AWS zone', default='a',
+                  group='k8s')
+         .add_str('--ssn_keystore_password', 'ssn_keystore_password',
+                  group='helm_charts')
+         .add_str('--endpoint_keystore_password', 'endpoint_keystore_password',
+                  group='helm_charts')
+         .add_str('--ssn_bucket_name', 'ssn_bucket_name',
+                  group='helm_charts')
+         .add_str('--endpoint_eip_address', 'endpoint_eip_address',
+                  group='helm_charts')
+         .add_str('--ldap_host', 'ldap host', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_dn', 'ldap dn', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_user', 'ldap user', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_bind_creds', 'ldap bind creds', required=True,
+                  group='helm_charts')
+         .add_str('--ldap_users_group', 'ldap users group', required=True,
+                  group='helm_charts')
+         )
+        return params.build()
+
+    def check_k8s_cluster_status(self):
+        """ Check for kubernetes status
+
+        Returns:
+            None
+        Raises:
+            TerraformProviderError: if master or kubeDNS is not running
+
+        """
+        start_time = time.time()
+        Console.execute('ssh-keyscan {} >> ~/.ssh/known_hosts'.format(self.ip))
+        while True:
+            with Console.ssh(self.ip, self.user_name, self.pkey_path) as c:
+                k8c_info_status = c.run(
+                    'kubectl cluster-info | '
+                    'sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]//g"') \
+                    .stdout
+
+            kubernetes_success_status = 'Kubernetes master is running'
+            kubernetes_dns_success_status = 'KubeDNS is running'
+
+            kubernetes_succeed = kubernetes_success_status in k8c_info_status
+            kube_dns_succeed = kubernetes_dns_success_status in k8c_info_status
+
+            if kubernetes_succeed and kube_dns_succeed:
+                break
+            if (time.time() - start_time) >= 600:
+                raise TimeoutError
+            time.sleep(60)
+
+    def check_tiller_status(self):
+        """ Check tiller status
+
+        Returns:
+            None
+        Raises:
+            TerraformProviderError: if tiller is not running
+
+        """
+        start_time = time.time()
+
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as c:
+            while True:
+                tiller_status = c.run(
+                    "kubectl get pods --all-namespaces | grep tiller | awk '{print $4}'") \
+                    .stdout
+
+                tiller_success_status = 'Running'
+
+                if tiller_success_status in tiller_status:
+                    break
+                if (time.time() - start_time) >= 1200:
+                    raise TimeoutError
+                time.sleep(60)
+
+    def select_master_ip(self):
+        terraform = TerraformProvider()
+        output = terraform.output('-json ssn_k8s_masters_ip_addresses')
+        self.ip = self.get_node_ip(output)
+
+    def copy_terraform_to_remote(self):
+        logging.info('transfer terraform dir to remote')
+        tf_dir = os.path.abspath(
+            os.path.join(os.getcwd(), os.path.pardir, os.path.pardir))
+        source = os.path.join(tf_dir, 'ssn-helm-charts')
+        remote_dir = '/home/{}/terraform/'.format(self.user_name)
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as conn:
+            conn.run('mkdir -p {}'.format(remote_dir))
+            rsync(conn, source, remote_dir)
+
+    def run_remote_terraform(self):
+        args = self.parse_args()
+        dns_name = json.loads(TerraformProvider()
+                              .output('-json ssn_k8s_alb_dns_name'))
+        logging.info('apply ssn-helm-charts')
+        terraform_args = args.get('helm_charts')
+        args_str = get_args_string(terraform_args)
+        with Console.ssh(self.ip, self.user_name, self.pkey_path) as conn:
+            with conn.cd('terraform/ssn-helm-charts/main'):
+                conn.run('terraform init')
+                conn.run('terraform validate')
+                conn.run('terraform apply -auto-approve {} '
+                         '-var \'ssn_k8s_alb_dns_name={}\''
+                         .format(args_str, dns_name))
+                output = ' '.join(conn.run('terraform output -json')
+                                  .stdout.split())
+                self.fill_args_from_dict(json.loads(output))
+
+    def output_terraform_result(self):
+        dns_name = json.loads(
+            TerraformProvider().output('-json ssn_k8s_alb_dns_name'))
+        ssn_bucket_name = json.loads(
+            TerraformProvider().output('-json ssn_bucket_name'))
+        ssn_k8s_sg_id = json.loads(
+            TerraformProvider().output('-json ssn_k8s_sg_id'))
+        ssn_subnets = json.loads(
+            TerraformProvider().output('-json ssn_subnets'))
+        ssn_vpc_id = json.loads(TerraformProvider().output('-json ssn_vpc_id'))
+
+        logging.info("""
+        DLab SSN K8S cluster has been deployed successfully!
+        Summary:
+        DNS name: {}
+        Bucket name: {}
+        VPC ID: {}
+        Subnet IDs:  {}
+        SG IDs: {}
+        DLab UI URL: http://{}
+        """.format(dns_name, ssn_bucket_name, ssn_vpc_id,
+                   ', '.join(ssn_subnets), ssn_k8s_sg_id, dns_name))
+
+    def fill_args_from_dict(self, output):
+        for key, value in output.items():
+            sys.argv.extend(['--'+key, value.get('value')])
+
+    def deploy(self):
+        if self.args.get('service').get('action') == 'destroy':
+            return
+        logging.info('deploy')
+        self.select_master_ip()
+        self.check_k8s_cluster_status()
+        self.check_tiller_status()
+        output = ' '.join(TerraformProvider().output('-json').split())
+        self.fill_args_from_dict(json.loads(output))
+        self.copy_terraform_to_remote()
+        self.run_remote_terraform()
+        self.output_terraform_result()
+
+
+class AWSEndpointBuilder(AbstractDeployBuilder):
+    @property
+    def terraform_location(self):
+        tf_dir = os.path.abspath(os.path.join(os.getcwd(),
+                                              os.path.pardir, os.path.pardir))
+        return os.path.join(tf_dir, 'endpoint/main')
+
+    @property
+    def terraform_args_group_name(self):
+        return 'endpoint'
+
+    @property
+    def cli_args(self):
+        params = ParamsBuilder()
+        (params
+         .add_str('--action', 'Action', default='deploy',
+                  group='service')
+         .add_str('--pkey', 'path to key', required=True, group='service')
+         .add_str('--service_base_name',
+                  'Any infrastructure value (should be unique if  multiple '
+                  'SSN\'s have been deployed before). Should be  same as on ssn',
+                  group='endpoint')
+         .add_str('--vpc_id', 'ID of AWS VPC if you already have VPC created.',
+                  group='endpoint')
+         .add_str('--vpc_cidr', 'CIDR for VPC creation. Conflicts with vpc_id.',
+                  default='172.31.0.0/16', group='endpoint')
+         .add_str('--subnet_id',
+                  'ID of AWS Subnet if you already have subnet created.',
+                  group='endpoint')
+         .add_str('--subnet_cidr',
+                  'CIDR for Subnet creation. Conflicts with subnet_id.',
+                  default='172.31.0.0/24', group='endpoint')
+         .add_str('--ami', 'ID of EC2 AMI.', required=True, group='endpoint')
+         .add_str('--key_name', 'Name of EC2 Key pair.', required=True,
+                  group='endpoint')
+         .add_str('--endpoint_id', 'Endpoint id.', required=True,
+                  group='endpoint')
+         .add_str('--region', 'Name of AWS region.', default='us-west-2',
+                  group='endpoint')
+         .add_str('--zone', 'Name of AWS zone.', default='a', group='endpoint')
+         .add_str('--network_type',
+                  'Type of created network (if network is not existed and '
+                  'require creation) for endpoint',
+                  default='public', group='endpoint')
+         .add_str('--endpoint_instance_shape', 'Instance shape of Endpoint.',
+                  default='t2.medium', group='endpoint')
+         .add_int('--endpoint_volume_size', 'Size of root volume in GB.',
+                  default=30, group='endpoint')
+
+         )
+        return params.build()
+
+    def deploy(self):
+        start_deploy()
+
+
+def main():
+    sources_targets = {'aws': ['k8s', 'endpoint']}
+
+    no_args_error = ('usage: ./terraform-cli {} {}'
+                     .format(set(sources_targets.keys()),
+                             set(itertools.chain(*sources_targets.values()))))
+
+    no_target_error = lambda x: ('usage: ./terraform-cli {} {}'
+                                 .format(x,
+                                         set(itertools.chain(
+                                             *sources_targets.values()))))
+
+    if any([len(sys.argv) == 1,
+            len(sys.argv) > 2 and sys.argv[1] not in sources_targets]):
+        print(no_args_error)
+        sys.exit(1)
+
+    if any([len(sys.argv) == 2,
+            sys.argv[1] not in sources_targets,
+            len(sys.argv) > 2 and sys.argv[2] not in sources_targets[
+                sys.argv[1]]
+            ]):
+        print(no_target_error(sys.argv[1]))
+        exit(1)
+
+    source = sys.argv[1]
+    target = sys.argv[2]
+
+    if source == 'aws':
+        if target == 'k8s':
+            builders = AWSK8sSourceBuilder(),
+        elif target == 'endpoint':
+            builders = (AWSK8sSourceBuilder(), AWSEndpointBuilder())
+    deploy_director = DeployDirector()
+    deploy_director.build(*builders)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf
new file mode 100644
index 0000000..9551d65
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf
@@ -0,0 +1,36 @@
+# *****************************************************************************
+ #
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing,
+ # software distributed under the License is distributed on an
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ # KIND, either express or implied.  See the License for the
+ # specific language governing permissions and limitations
+ # under the License.
+ #
+ # ******************************************************************************
+
+ locals {
+   shared_bucket_name = "${var.service_base_name}-${var.endpoint_id}-shared-bucket"
+   additional_tag  = split(":", var.additional_tag)
+ }
+
+ resource "google_storage_bucket" "shared_bucket" {
+   name     = local.shared_bucket_name
+   force_destroy = true
+   labels = {
+     name                              = local.shared_bucket_name
+     "${local.additional_tag[0]}"      = local.additional_tag[1]
+     "${var.service_base_name}-tag"    = local.shared_bucket_name
+     "endpoint_tag"                    = var.endpoint_id
+   }
+ }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/iam.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/iam.tf
new file mode 100644
index 0000000..eda7eb5
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/iam.tf
@@ -0,0 +1,49 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  endpoint_policy_name      = "${var.service_base_name}-${var.endpoint_id}-policy"
+  endpoint_role_name        = "${var.service_base_name}-${var.endpoint_id}-role"
+  service_account_name      = "${var.service_base_name}-${var.endpoint_id}-sa"
+}
+
+resource "google_service_account" "endpoint_sa" {
+  account_id   = local.service_account_name
+  display_name = local.service_account_name
+}
+
+resource "google_project_iam_custom_role" "endpoint_role" {
+  permissions = var.endpoint_policies
+  role_id     = replace(local.endpoint_role_name, "-", "_")
+  title       = local.endpoint_role_name
+}
+
+resource "google_project_iam_member" "iam" {
+  # try to set perms as file
+  count  = length(var.endpoint_roles)
+  member = "serviceAccount:${google_service_account.endpoint_sa.email}"
+  role   = element(var.endpoint_roles, count.index)
+}
+
+resource "google_project_iam_member" "role_for_member" {
+  member = "serviceAccount:${google_service_account.endpoint_sa.email}"
+  role   = google_project_iam_custom_role.endpoint_role.id
+}
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/instance.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/instance.tf
new file mode 100644
index 0000000..96fa251
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/instance.tf
@@ -0,0 +1,66 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  endpoint_instance_name = "${var.service_base_name}-${var.endpoint_id}-endpoint"
+  endpoint_instance_ip   = "${var.service_base_name}-${var.endpoint_id}-static-ip"
+}
+
+resource "google_compute_instance" "endpoint" {
+  name         = local.endpoint_instance_name
+  machine_type = var.endpoint_shape
+  zone         = var.zone
+  tags         = [replace(local.endpoint_instance_name, "_", "-")]
+  labels       = {
+    name        = local.endpoint_instance_name
+    sbn         = var.service_base_name
+    product     = var.product
+    endpoint_id = var.endpoint_id
+  }
+
+  boot_disk {
+    initialize_params {
+      image = var.ami
+      size  = var.endpoint_volume_size
+    }
+  }
+
+  metadata = {
+    ssh-keys = "ubuntu:${file(var.path_to_pub_key)}"
+  }
+
+  service_account {
+    email  = google_service_account.endpoint_sa.email
+    scopes = ["https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute"]
+  }
+
+  network_interface {
+    network    = data.google_compute_network.endpoint_vpc_data.name
+    subnetwork = data.google_compute_subnetwork.endpoint_subnet_data.name
+    access_config {
+      nat_ip = google_compute_address.static.address
+    }
+  }
+}
+
+resource "google_compute_address" "static" {
+  name = local.endpoint_instance_ip
+}
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/endpoint/main/main.tf
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/endpoint/main/main.tf
index 16da950..9d69110 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/main.tf
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+provider "google" {
+  credentials = file(var.creds_file)
+  project     = var.gcp_project_id
+  region      = var.region
+  zone        = var.zone
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
new file mode 100644
index 0000000..65becf8
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
@@ -0,0 +1,72 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  vpc_id                = "${var.service_base_name}-${var.endpoint_id}-vpc"
+  subnet_name           = "${var.service_base_name}-${var.endpoint_id}-subnet"
+  firewall_ingress_name = "${var.service_base_name}-${var.endpoint_id}-ingress-sg"
+  firewall_egress_name  = "${var.service_base_name}-${var.endpoint_id}-egress-sg"
+}
+
+resource "google_compute_network" "endpoint_vpc" {
+  count = var.vpc_id == "" ? 1 : 0
+  name                    = local.vpc_id
+  auto_create_subnetworks = false
+}
+
+data "google_compute_network" "endpoint_vpc_data" {
+  name = var.vpc_id == "" ? google_compute_network.endpoint_vpc.0.name : var.vpc_id
+}
+
+resource "google_compute_subnetwork" "endpoint_subnet" {
+  count         = var.subnet_id == "" ? 1 : 0
+  name          = local.subnet_name
+  ip_cidr_range = var.subnet_cidr
+  region        = var.region
+  network       = data.google_compute_network.endpoint_vpc_data.id
+}
+
+data "google_compute_subnetwork" "endpoint_subnet_data" {
+  name = var.subnet_id == "" ? google_compute_subnetwork.endpoint_subnet.0.name : var.subnet_id
+}
+
+resource "google_compute_firewall" "firewall-ingress" {
+  name    = local.firewall_ingress_name
+  network = data.google_compute_network.endpoint_vpc_data.name
+  allow {
+    protocol = "tcp"
+    ports    = ["22", "8084", "8085", "4822", "8088"]
+  }
+  target_tags   = ["${var.service_base_name}-${var.endpoint_id}-endpoint"]
+  source_ranges = [var.firewall_ing_cidr_range]
+
+}
+
+resource "google_compute_firewall" "firewall-egress" {
+  name      = local.firewall_egress_name
+  network   = data.google_compute_network.endpoint_vpc_data.name
+  direction = "EGRESS"
+  allow {
+    protocol = "all"
+  }
+  target_tags        = ["${var.service_base_name}-${var.endpoint_id}-endpoint"]
+  destination_ranges = [var.firewall_eg_cidr_range]
+}
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/endpoint/main/outputs.tf
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/endpoint/main/outputs.tf
index 16da950..b031b2c 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/outputs.tf
@@ -19,19 +19,14 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+output "endpoint_eip_address" {
+  value = google_compute_address.static.address
+}
 
+output "subnet_id" {
+  value = data.google_compute_subnetwork.endpoint_subnet_data.name
+}
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+output "vpc_id" {
+  value = data.google_compute_network.endpoint_vpc_data.name
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/variables.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/variables.tf
new file mode 100644
index 0000000..b1d89ba
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/variables.tf
@@ -0,0 +1,163 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "gcp_project_id" {
+  default = ""
+}
+
+variable "creds_file" {
+  default = ""
+}
+
+variable "endpoint_shape" {
+  default = "n1-standard-2"
+}
+
+variable "region" {
+  default = ""
+}
+
+variable "zone" {
+  default = ""
+}
+
+variable "service_base_name" {
+  default = ""
+}
+
+variable "endpoint_id" {
+  default = ""
+}
+
+variable "vpc_id" {
+  default = ""
+}
+
+variable "ami" {
+  default = "/projects/ubuntu-os-cloud/global/images/ubuntu-1604-xenial-v20190628"
+}
+
+variable "subnet_id" {
+  default = ""
+}
+
+variable "endpoint_volume_size" {
+  default = "20"
+}
+
+variable "subnet_cidr" {
+  default = "172.31.0.0/24"
+}
+
+variable "firewall_ing_cidr_range" {
+  default = "0.0.0.0/0"
+}
+
+variable "firewall_eg_cidr_range" {
+  default = "0.0.0.0/0"
+}
+
+variable "endpoint_policies" {
+  type = "list"
+  default = [
+    "storage.buckets.create",
+    "storage.buckets.delete",
+    "storage.buckets.get",
+    "storage.buckets.getIamPolicy",
+    "storage.buckets.list",
+    "storage.buckets.setIamPolicy",
+    "storage.buckets.update",
+    "storage.objects.create",
+    "storage.objects.delete",
+    "storage.objects.get",
+    "storage.objects.getIamPolicy",
+    "storage.objects.list",
+    "storage.objects.setIamPolicy",
+    "storage.objects.update",
+    "compute.autoscalers.get",
+    "compute.instances.get",
+    "compute.healthChecks.get",
+    "compute.addresses.create",
+    "compute.addresses.delete",
+    "compute.firewalls.create",
+    "compute.firewalls.delete",
+    "compute.firewalls.get",
+    "compute.firewalls.list",
+    "compute.images.create",
+    "compute.images.delete",
+    "compute.images.get",
+    "compute.images.list",
+    "compute.images.setLabels",
+    "compute.networks.get",
+    "compute.networks.create",
+    "compute.networks.delete",
+    "compute.networks.updatePolicy",
+    "compute.projects.setCommonInstanceMetadata",
+    "compute.projects.setDefaultServiceAccount",
+    "compute.subnetworks.create",
+    "compute.subnetworks.delete"
+  ]
+}
+
+variable "endpoint_roles" {
+  type = "list"
+  default = [
+    "roles/iam.serviceAccountUser",
+    "roles/iam.serviceAccountAdmin",
+    "roles/storage.admin",
+    "roles/dataproc.editor",
+    "roles/resourcemanager.projectIamAdmin",
+    "roles/iam.roleAdmin",
+    "roles/compute.instanceAdmin",
+    "roles/bigquery.dataViewer",
+    "roles/bigquery.jobUser"
+  ]
+}
+
+variable "path_to_pub_key" {
+  default = ""
+}
+
+variable "product" {
+  default = "dlab"
+}
+
+variable "additional_tag" {
+  default = "product:dlab"
+}
+
+variable "ldap_host" {}
+
+variable "ldap_dn" {}
+
+variable "ldap_user" {}
+
+variable "ldap_bind_creds" {}
+
+variable "ldap_users_group" {}
+
+variable "billing_enable" {}
+
+variable "billing_dataset_name" {}
+
+variable "mongo_password" {}
+
+variable "mongo_host" {}
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/provisioning.py b/infrastructure-provisioning/terraform/gcp/endpoint/provisioning.py
new file mode 100644
index 0000000..ac36747
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/provisioning.py
@@ -0,0 +1,632 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+import argparse
+import logging
+import sys
+import time
+import traceback
+from fabric import Connection
+from patchwork.files import exists
+
+conn = None
+args = None
+java_home = None
+
+
+def create_user():
+    initial_user = 'ubuntu'
+    sudo_group = 'sudo'
+    with Connection(host=args.hostname, user=initial_user,
+                    connect_kwargs={'key_filename': args.pkey}) as conn:
+        try:
+            if not exists(conn,
+                          '/home/{}/.ssh_user_ensured'.format(initial_user)):
+                conn.sudo('useradd -m -G {1} -s /bin/bash {0}'
+                          .format(args.os_user, sudo_group))
+                conn.sudo(
+                    'bash -c \'echo "{} ALL = NOPASSWD:ALL" >> /etc/sudoers\''.format(args.os_user, initial_user))
+                conn.sudo('mkdir /home/{}/.ssh'.format(args.os_user))
+                conn.sudo('chown -R {0}:{0} /home/{1}/.ssh/'
+                          .format(initial_user, args.os_user))
+                conn.sudo('cat /home/{0}/.ssh/authorized_keys > '
+                          '/home/{1}/.ssh/authorized_keys'
+                          .format(initial_user, args.os_user))
+                conn.sudo(
+                    'chown -R {0}:{0} /home/{0}/.ssh/'.format(args.os_user))
+                conn.sudo('chmod 700 /home/{0}/.ssh'.format(args.os_user))
+                conn.sudo('chmod 600 /home/{0}/.ssh/authorized_keys'
+                          .format(args.os_user))
+                conn.sudo(
+                    'touch /home/{}/.ssh_user_ensured'.format(initial_user))
+        except Exception as err:
+            logging.error('Failed to create new os_user: ', str(err))
+            sys.exit(1)
+
+
+def copy_keys():
+    try:
+        conn.put(args.pkey, '/home/{0}/keys/'.format(args.os_user))
+        conn.sudo('chown -R {0}:{0} /home/{0}/keys'.format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to copy admin key: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_dir_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir'.format(args.os_user)):
+            conn.sudo('mkdir /home/{}/.ensure_dir'.format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to create ~/.ensure_dir/: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_logs_endpoint():
+    log_root_dir = "/var/opt/dlab/log"
+    supervisor_log_file = "/var/log/application/provision-service.log"
+    try:
+        if not exists(conn, '/home/' + args.os_user + '/.ensure_dir/logs_ensured'):
+            if not exists(conn, args.dlab_path):
+                conn.sudo("mkdir -p " + args.dlab_path)
+                conn.sudo("chown -R " + args.os_user + ' ' + args.dlab_path)
+            if not exists(conn, log_root_dir):
+                conn.sudo('mkdir -p ' + log_root_dir + '/provisioning')
+                conn.sudo('touch ' + log_root_dir + '/provisioning/provisioning.log')
+            if not exists(conn, supervisor_log_file):
+                conn.sudo("mkdir -p /var/log/application")
+                conn.sudo("touch " + supervisor_log_file)
+            conn.sudo("chown -R {0} {1}".format(args.os_user, log_root_dir))
+            conn.sudo('touch /home/' + args.os_user + '/.ensure_dir/logs_ensured')
+    except Exception as err:
+        print('Failed to configure logs and dlab directory: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_jre_jdk_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir/jre_jdk_ensured'.format(args.os_user)):
+            conn.sudo('apt-get install -y openjdk-8-jre-headless')
+            conn.sudo('apt-get install -y openjdk-8-jdk-headless')
+            conn.sudo('touch /home/{}/.ensure_dir/jre_jdk_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to install Java JDK: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_supervisor_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir/superv_ensured'.format(args.os_user)):
+            conn.sudo('apt-get -y install supervisor')
+            conn.sudo('update-rc.d supervisor defaults')
+            conn.sudo('update-rc.d supervisor enable')
+            conn.sudo('touch /home/{}/.ensure_dir/superv_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to install Supervisor: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_docker_endpoint():
+    try:
+        if not exists(conn, '/home/{}/.ensure_dir/docker_ensured'.format(args.os_user)):
+            conn.sudo("bash -c "
+                      "'curl -fsSL https://download.docker.com/linux/ubuntu/gpg"
+                      " | apt-key add -'")
+            conn.sudo('add-apt-repository "deb [arch=amd64] '
+                      'https://download.docker.com/linux/ubuntu '
+                      '$(lsb_release -cs) stable"')
+            conn.sudo('apt-get update')
+            conn.sudo('apt-cache policy docker-ce')
+            conn.sudo('apt-get install -y docker-ce={}'
+                      .format(args.docker_version))
+            if not exists(conn, '{}/tmp'.format(args.dlab_path)):
+                conn.run('mkdir -p {}/tmp'.format(args.dlab_path))
+            conn.put('./daemon.json',
+                     '{}/tmp/daemon.json'.format(args.dlab_path))
+            conn.sudo('sed -i "s|REPOSITORY|{}:{}|g" {}/tmp/daemon.json'
+                      .format(args.repository_address,
+                              args.repository_port,
+                              args.dlab_path))
+            if args.cloud_provider == "aws":
+                dns_ip_resolve = (conn.run("systemd-resolve --status "
+                                           "| grep -A 5 'Current Scopes: DNS' "
+                                           "| grep 'DNS Servers:' "
+                                           "| awk '{print $3}'")
+                                  .stdout.rstrip("\n\r"))
+                conn.sudo('sed -i "s|DNS_IP_RESOLVE|\"dns\": [{0}],|g" {1}/tmp/daemon.json'
+                          .format(dns_ip_resolve, args.dlab_path))
+            elif args.cloud_provider == "gcp":
+                dns_ip_resolve = ""
+                conn.sudo('sed -i "s|DNS_IP_RESOLVE||g" {1}/tmp/daemon.json'
+                          .format(dns_ip_resolve, args.dlab_path))
+            conn.sudo('mv {}/tmp/daemon.json /etc/docker'
+                      .format(args.dlab_path))
+            conn.sudo('usermod -a -G docker ' + args.os_user)
+            conn.sudo('update-rc.d docker defaults')
+            conn.sudo('update-rc.d docker enable')
+            conn.sudo('service docker restart')
+            conn.sudo('touch /home/{}/.ensure_dir/docker_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to install Docker: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def create_key_dir_endpoint():
+    try:
+        if not exists(conn, '/home/{}/keys'.format(args.os_user)):
+            conn.run('mkdir /home/{}/keys'.format(args.os_user))
+    except Exception as err:
+        logging.error('Failed create keys directory as ~/keys: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def configure_keystore_endpoint(os_user):
+    try:
+        # TEMPORARY COMENTED!!!
+        if args.cloud_provider == "aws":
+            conn.sudo('apt-get install -y awscli')
+            if not exists(conn, '/home/' + args.os_user + '/keys/endpoint.keystore.jks'):
+                conn.sudo('aws s3 cp s3://{0}/dlab/certs/endpoint/endpoint.keystore.jks '
+                          '/home/{1}/keys/endpoint.keystore.jks'
+                          .format(args.ssn_bucket_name, args.os_user))
+            if not exists(conn, '/home/' + args.os_user + '/keys/dlab.crt'):
+                conn.sudo('aws s3 cp s3://{0}/dlab/certs/endpoint/endpoint.crt'
+                          ' /home/{1}/keys/endpoint.crt'.format(args.ssn_bucket_name, args.os_user))
+        #     if not exists(conn, '/home/' + args.os_user + '/keys/ssn.crt'):
+        #         conn.sudo('aws s3 cp '
+        #                   's3://{0}/dlab/certs/ssn/ssn.crt /home/{1}/keys/ssn.crt'
+        #                   .format(args.ssn_bucket_name, args.os_user))
+        elif args.cloud_provider == "gcp":
+            if not exists(conn, '/home/' + args.os_user + '/keys/endpoint.keystore.jks'):
+                conn.sudo('gsutil -m cp -r gs://{0}/dlab/certs/endpoint/endpoint.keystore.jks '
+                          '/home/{1}/keys/'
+                          .format(args.ssn_bucket_name, args.os_user))
+            if not exists(conn, '/home/' + args.os_user + '/keys/dlab.crt'):
+                conn.sudo('gsutil -m cp -r gs://{0}/dlab/certs/endpoint/endpoint.crt'
+                          ' /home/{1}/keys/'.format(args.ssn_bucket_name, args.os_user))
+        #     if not exists(conn, '/home/' + args.os_user + '/keys/ssn.crt'):
+        #         conn.sudo('gsutil -m cp -r '
+        #                   'gs://{0}/dlab/certs/ssn/ssn.crt /home/{1}/keys/'
+        #                   .format(args.ssn_bucket_name, args.os_user))
+        if not exists(conn, '/home/' + args.os_user + '/.ensure_dir/cert_imported'):
+            conn.sudo('keytool -importcert -trustcacerts -alias dlab -file /home/{0}/keys/endpoint.crt -noprompt \
+                 -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_home))
+        #     conn.sudo('keytool -importcert -trustcacerts -file /home/{0}/keys/ssn.crt -noprompt \
+        #          -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_home))
+            conn.sudo('touch /home/' + args.os_user + '/.ensure_dir/cert_imported')
+        print("Certificates are imported.")
+    except Exception as err:
+        print('Failed to configure Keystore certificates: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def configure_supervisor_endpoint():
+    try:
+        if not exists(conn,
+                      '/home/{}/.ensure_dir/configure_supervisor_ensured'.format(args.os_user)):
+            supervisor_conf = '/etc/supervisor/conf.d/supervisor_svc.conf'
+            if not exists(conn, '{}/tmp'.format(args.dlab_path)):
+                conn.run('mkdir -p {}/tmp'.format(args.dlab_path))
+            conn.put('./supervisor_svc.conf',
+                     '{}/tmp/supervisor_svc.conf'.format(args.dlab_path))
+            dlab_conf_dir = '{}/conf/'.format(args.dlab_path)
+            if not exists(conn, dlab_conf_dir):
+                conn.run('mkdir -p {}'.format(dlab_conf_dir))
+            web_path = '{}/webapp'.format(args.dlab_path)
+            if not exists(conn, web_path):
+                conn.run('mkdir -p {}'.format(web_path))
+            conn.sudo('sed -i "s|OS_USR|{}|g" {}/tmp/supervisor_svc.conf'
+                      .format(args.os_user, args.dlab_path))
+            conn.sudo('sed -i "s|WEB_CONF|{}|g" {}/tmp/supervisor_svc.conf'
+                      .format(dlab_conf_dir, args.dlab_path))
+            conn.sudo('sed -i \'s=WEB_APP_DIR={}=\' {}/tmp/supervisor_svc.conf'
+                      .format(web_path, args.dlab_path))
+            conn.sudo('cp {}/tmp/supervisor_svc.conf {}'
+                      .format(args.dlab_path, supervisor_conf))
+            conn.put('./provisioning.yml', '{}provisioning.yml'
+                     .format(dlab_conf_dir))
+            conn.sudo('sed -i "s|KEYNAME|{}|g" {}provisioning.yml'
+                      .format(args.key_name, dlab_conf_dir))
+            conn.sudo('sed -i "s|KEYSTORE_PASSWORD|{}|g" {}provisioning.yml'
+                      .format(args.endpoint_keystore_password, dlab_conf_dir))
+            conn.sudo('sed -i "s|JRE_HOME|{}|g" {}provisioning.yml'
+                      .format(java_home, dlab_conf_dir))
+            conn.sudo('sed -i "s|CLOUD_PROVIDER|{}|g" {}provisioning.yml'
+                      .format(args.cloud_provider, dlab_conf_dir))
+
+            conn.sudo('sed -i "s|MONGO_HOST|{}|g" {}provisioning.yml'
+                      .format(args.mongo_host, dlab_conf_dir))
+            conn.sudo('sed -i "s|MONGO_PORT|{}|g" {}provisioning.yml'
+                      .format(args.mongo_port, dlab_conf_dir))
+            conn.sudo('sed -i "s|SS_HOST|{}|g" {}provisioning.yml'
+                      .format(args.ss_host, dlab_conf_dir))
+            conn.sudo('sed -i "s|SS_PORT|{}|g" {}provisioning.yml'
+                      .format(args.ss_port, dlab_conf_dir))
+            conn.sudo('sed -i "s|KEYCLOACK_HOST|{}|g" {}provisioning.yml'
+                      .format(args.keycloack_host, dlab_conf_dir))
+
+            conn.sudo('sed -i "s|CLIENT_SECRET|{}|g" {}provisioning.yml'
+                      .format(args.keycloak_client_secret, dlab_conf_dir))
+            # conn.sudo('sed -i "s|MONGO_PASSWORD|{}|g" {}provisioning.yml'
+            #           .format(args.mongo_password, dlab_conf_dir))
+            conn.sudo('sed -i "s|CONF_OS|{}|g" {}provisioning.yml'
+                      .format(args.conf_os, dlab_conf_dir))
+            conn.sudo('sed -i "s|SERVICE_BASE_NAME|{}|g" {}provisioning.yml'
+                      .format(args.service_base_name, dlab_conf_dir))
+            conn.sudo('sed -i "s|EDGE_INSTANCE_SIZE|{}|g" {}provisioning.yml'
+                      .format(args.edge_instence_size, dlab_conf_dir))
+            conn.sudo('sed -i "s|SUBNET_ID|{}|g" {}provisioning.yml'
+                      .format(args.subnet_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|REGION|{}|g" {}provisioning.yml'
+                      .format(args.region, dlab_conf_dir))
+            conn.sudo('sed -i "s|ZONE|{}|g" {}provisioning.yml'
+                      .format(args.zone, dlab_conf_dir))
+            conn.sudo('sed -i "s|TAG_RESOURCE_ID|{}|g" {}provisioning.yml'
+                      .format(args.tag_resource_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|SG_IDS|{}|g" {}provisioning.yml'
+                      .format(args.sg_ids, dlab_conf_dir))
+            conn.sudo('sed -i "s|SSN_INSTANCE_SIZE|{}|g" {}provisioning.yml'
+                      .format(args.ssn_instance_size, dlab_conf_dir))
+            conn.sudo('sed -i "s|VPC2_ID|{}|g" {}provisioning.yml'
+                      .format(args.vpc2_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|SUBNET2_ID|{}|g" {}provisioning.yml'
+                      .format(args.subnet2_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|CONF_KEY_DIR|{}|g" {}provisioning.yml'
+                      .format(args.conf_key_dir, dlab_conf_dir))
+            conn.sudo('sed -i "s|VPC_ID|{}|g" {}provisioning.yml'
+                      .format(args.vpc_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|PEERING_ID|{}|g" {}provisioning.yml'
+                      .format(args.peering_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|AZURE_RESOURCE_GROUP_NAME|{}|g" {}provisioning.yml'
+                      .format(args.azure_resource_group_name, dlab_conf_dir))
+            conn.sudo('sed -i "s|AZURE_SSN_STORAGE_ACCOUNT_TAG|{}|g" {}provisioning.yml'
+                      .format(args.azure_ssn_storage_account_tag, dlab_conf_dir))
+            conn.sudo('sed -i "s|AZURE_SHARED_STORAGE_ACCOUNT_TAG|{}|g" {}provisioning.yml'
+                      .format(args.azure_shared_storage_account_tag, dlab_conf_dir))
+            conn.sudo('sed -i "s|AZURE_DATALAKE_TAG|{}|g" {}provisioning.yml'
+                      .format(args.azure_datalake_tag, dlab_conf_dir))
+            conn.sudo('sed -i "s|AZURE_CLIENT_ID|{}|g" {}provisioning.yml'
+                      .format(args.azure_client_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|GCP_PROJECT_ID|{}|g" {}provisioning.yml'
+                      .format(args.gcp_project_id, dlab_conf_dir))
+            conn.sudo('sed -i "s|LDAP_HOST|{}|g" {}provisioning.yml'
+                      .format(args.ldap_host, dlab_conf_dir))
+            conn.sudo('sed -i "s|LDAP_DN|{}|g" {}provisioning.yml'
+                      .format(args.ldap_dn, dlab_conf_dir))
+            conn.sudo('sed -i "s|LDAP_OU|{}|g" {}provisioning.yml'
+                      .format(args.ldap_ou, dlab_conf_dir))
+            conn.sudo('sed -i "s|LDAP_USER_NAME|{}|g" {}provisioning.yml'
+                      .format(args.ldap_user_name, dlab_conf_dir))
+            conn.sudo('sed -i "s|LDAP_USER_PASSWORD|{}|g" {}provisioning.yml'
+                      .format(args.ldap_user_password, dlab_conf_dir))
+            conn.sudo('touch /home/{}/.ensure_dir/configure_supervisor_ensured'
+                      .format(args.os_user))
+    except Exception as err:
+        logging.error('Failed to configure Supervisor: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def ensure_jar_endpoint():
+    try:
+        ensure_file = ('/home/{}/.ensure_dir/backend_jar_ensured'
+                       .format(args.os_user))
+        if not exists(conn, ensure_file):
+            web_path = '{}/webapp'.format(args.dlab_path)
+            if not exists(conn, web_path):
+                conn.run('mkdir -p {}'.format(web_path))
+            if args.cloud_provider == "aws":
+                conn.run('wget -P {}  --user={} --password={} '
+                         'https://{}/repository/packages/aws/provisioning-service-'
+                         '2.1.jar --no-check-certificate'
+                         .format(web_path, args.repository_user,
+                                 args.repository_pass, args.repository_address))
+            elif args.cloud_provider == "gcp":
+                conn.run('wget -P {}  --user={} --password={} '
+                         'https://{}/repository/packages/gcp/provisioning-service-'
+                         '2.1.jar --no-check-certificate'
+                         .format(web_path, args.repository_user,
+                                 args.repository_pass, args.repository_address))
+            conn.run('mv {0}/*.jar {0}/provisioning-service.jar'
+                     .format(web_path))
+            conn.sudo('touch {}'.format(ensure_file))
+    except Exception as err:
+        logging.error('Failed to download jar-provisioner: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def start_supervisor_endpoint():
+    try:
+        conn.sudo("service supervisor restart")
+    except Exception as err:
+        logging.error('Unable to start Supervisor: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def get_sources():
+    try:
+        conn.run("git clone https://github.com/apache/incubator-dlab.git {0}/sources".format(args.dlab_path))
+        if args.branch_name != "":
+            conn.run("cd {0}/sources && git checkout {1} && cd".format(args.dlab_path, args.branch_name))
+    except Exception as err:
+        logging.error('Failed to download sources: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def pull_docker_images():
+    try:
+        ensure_file = ('/home/{}/.ensure_dir/docker_images_pulled'
+                       .format(args.os_user))
+        if not exists(conn, ensure_file):
+            conn.sudo('docker login -u {} -p {} {}:{}'
+                      .format(args.repository_user,
+                              args.repository_pass,
+                              args.repository_address,
+                              args.repository_port))
+            conn.sudo('docker pull {}:{}/docker.dlab-base-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-edge-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-project-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-jupyter-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-rstudio-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-zeppelin-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-tensor-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-tensor-rstudio-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-deeplearning-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-dataengine-service-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker pull {}:{}/docker.dlab-dataengine-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-base-{} docker.dlab-base'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-edge-{} docker.dlab-edge'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-project-{} docker.dlab-project'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-jupyter-{} docker.dlab-jupyter'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-rstudio-{} docker.dlab-rstudio'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-zeppelin-{} '
+                      'docker.dlab-zeppelin'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-tensor-{} docker.dlab-tensor'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-tensor-rstudio-{} '
+                      'docker.dlab-tensor-rstudio'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-deeplearning-{} '
+                      'docker.dlab-deeplearning'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-dataengine-service-{} '
+                      'docker.dlab-dataengine-service'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker tag {}:{}/docker.dlab-dataengine-{} '
+                      'docker.dlab-dataengine'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-base-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-edge-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-project-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-jupyter-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-rstudio-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-zeppelin-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-tensor-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-tensor-rstudio-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-deeplearning-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-dataengine-service-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('docker rmi {}:{}/docker.dlab-dataengine-{}'
+                      .format(args.repository_address, args.repository_port, args.cloud_provider))
+            conn.sudo('chown -R {0}:docker /home/{0}/.docker/'
+                      .format(args.os_user))
+            conn.sudo('touch {}'.format(ensure_file))
+    except Exception as err:
+        logging.error('Failed to pull Docker images: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def init_args():
+    global args
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--dlab_path', type=str, default='/opt/dlab')
+    parser.add_argument('--key_name', type=str, default='', help='Name of admin key without .pem extension')
+    parser.add_argument('--endpoint_eip_address', type=str)
+    parser.add_argument('--pkey', type=str, default='')
+    parser.add_argument('--hostname', type=str, default='')
+    parser.add_argument('--os_user', type=str, default='dlab-user')
+    parser.add_argument('--cloud_provider', type=str, default='')
+
+    parser.add_argument('--mongo_host', type=str, default='MONGO_HOST')
+    parser.add_argument('--mongo_port', type=str, default='27017')
+    parser.add_argument('--ss_host', type=str, default='')
+    parser.add_argument('--ss_port', type=str, default='8443')
+    parser.add_argument('--keycloack_host', type=str, default='')
+
+    # parser.add_argument('--mongo_password', type=str, default='')
+    parser.add_argument('--repository_address', type=str, default='')
+    parser.add_argument('--repository_port', type=str, default='')
+    parser.add_argument('--repository_user', type=str, default='')
+    parser.add_argument('--repository_pass', type=str, default='')
+    parser.add_argument('--docker_version', type=str,
+                        default='18.06.3~ce~3-0~ubuntu')
+    parser.add_argument('--ssn_bucket_name', type=str, default='')
+    parser.add_argument('--endpoint_keystore_password', type=str, default='')
+    parser.add_argument('--keycloak_client_secret', type=str, default='')
+    parser.add_argument('--branch_name', type=str, default='master')  # change default
+
+    parser.add_argument('--conf_os', type=str, default='debian')
+    parser.add_argument('--service_base_name', type=str, default='')
+    parser.add_argument('--edge_instence_size', type=str, default='')
+    parser.add_argument('--subnet_id', type=str, default='')
+    parser.add_argument('--region', type=str, default='')
+    parser.add_argument('--zone', type=str, default='')
+    parser.add_argument('--tag_resource_id', type=str, default='')
+    parser.add_argument('--sg_ids', type=str, default='')
+    parser.add_argument('--ssn_instance_size', type=str, default='')
+    parser.add_argument('--vpc2_id', type=str, default='')
+    parser.add_argument('--subnet2_id', type=str, default='')
+    parser.add_argument('--conf_key_dir', type=str, default='/root/keys/', help='Should end by symbol /')
+    parser.add_argument('--vpc_id', type=str, default='')
+    parser.add_argument('--peering_id', type=str, default='')
+    parser.add_argument('--azure_resource_group_name', type=str, default='')
+    parser.add_argument('--azure_ssn_storage_account_tag', type=str, default='')
+    parser.add_argument('--azure_shared_storage_account_tag', type=str, default='')
+    parser.add_argument('--azure_datalake_tag', type=str, default='')
+    parser.add_argument('--azure_client_id', type=str, default='')
+    parser.add_argument('--gcp_project_id', type=str, default='')
+    parser.add_argument('--ldap_host', type=str, default='')
+    parser.add_argument('--ldap_dn', type=str, default='')
+    parser.add_argument('--ldap_ou', type=str, default='')
+    parser.add_argument('--ldap_user_name', type=str, default='')
+    parser.add_argument('--ldap_user_password', type=str, default='')
+    print(parser.parse_known_args())
+    args = parser.parse_known_args()[0]
+
+
+def update_system():
+    conn.sudo('apt-get update')
+
+
+def init_dlab_connection(ip=None, user=None,
+                         pkey=None):
+    global conn
+    if not ip:
+        ip = args.hostname
+    if not user:
+        user = args.os_user
+    if not pkey:
+        pkey = args.pkey
+    try:
+        conn = Connection(ip, user, connect_kwargs={'key_filename': pkey})
+    except Exception as err:
+        logging.error('Failed connect as dlab-user: ', str(err))
+        traceback.print_exc()
+        sys.exit(1)
+
+
+def set_java_home():
+    global java_home
+    command = ('bash -c "update-alternatives --query java | grep \'Value: \' '
+               '| grep -o \'/.*/jre\'" ')
+    java_home = (conn.sudo(command).stdout.rstrip("\n\r"))
+
+
+def close_connection():
+    global conn
+    conn.close()
+
+
+def start_deploy():
+    global args
+    init_args()
+    print(args)
+    if args.hostname == "":
+        args.hostname = args.endpoint_eip_address
+
+    print("Start provisioning of Endpoint.")
+    time.sleep(40)
+
+    print(args)
+    logging.info("Creating dlab-user")
+    create_user()
+
+    init_dlab_connection()
+    update_system()
+
+    logging.info("Configuring ensure dir")
+    ensure_dir_endpoint()
+
+    logging.info("Configuring Logs")
+    ensure_logs_endpoint()
+
+    logging.info("Installing Java")
+    ensure_jre_jdk_endpoint()
+
+    set_java_home()
+
+    logging.info("Installing Supervisor")
+    ensure_supervisor_endpoint()
+
+    logging.info("Installing Docker")
+    ensure_docker_endpoint()
+
+    logging.info("Configuring Supervisor")
+    configure_supervisor_endpoint()
+
+    logging.info("Creating key directory")
+    create_key_dir_endpoint()
+
+    logging.info("Copying admin key")
+    copy_keys()
+
+    logging.info("Configuring certificates")
+    configure_keystore_endpoint(args.os_user)
+
+    logging.info("Ensure jar")
+    ensure_jar_endpoint()
+
+    logging.info("Downloading sources")
+    get_sources()
+
+    logging.info("Pulling docker images")
+    pull_docker_images()
+
+    logging.info("Starting supervisor")
+    start_supervisor_endpoint()
+
+    close_connection()
+    print("Done provisioning of Endpoint.")
+
+
+if __name__ == "__main__":
+    start_deploy()
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/provisioning.yml b/infrastructure-provisioning/terraform/gcp/endpoint/provisioning.yml
new file mode 100644
index 0000000..6edb057
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/provisioning.yml
@@ -0,0 +1,179 @@
+# *****************************************************************************
+#
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+#
+# ******************************************************************************
+
+<#assign LOG_ROOT_DIR="/var/opt/dlab/log">
+<#assign KEYS_DIR="/home/${sys['user.name']}/keys">
+<#assign KEY_STORE_PATH="${KEYS_DIR}/endpoint.keystore.jks">
+<#assign KEY_STORE_PASSWORD="KEYSTORE_PASSWORD">
+<#assign TRUST_STORE_PATH="JRE_HOME/lib/security/cacerts">
+<#assign TRUST_STORE_PASSWORD="changeit">
+
+# Available options are aws, azure, gcp
+<#assign CLOUD_TYPE="CLOUD_PROVIDER">
+cloudProvider: ${CLOUD_TYPE}
+
+#Switch on/off developer mode here
+<#assign DEV_MODE="false">
+devMode: ${DEV_MODE}
+
+
+mongo:
+  host: MONGO_HOST
+  port: MONGO_PORT
+  username: admin
+  password: MONGO_PASSWORD
+  database: dlabdb
+
+selfService:
+  protocol: https
+  host: SS_HOST
+  port: SS_PORT
+  jerseyClient:
+    timeout: 3s
+    connectionTimeout: 3s
+
+securityService:
+  protocol: https
+  host: DOESNT_MATTER
+  port: 8090
+  jerseyClient:
+    timeout: 20s
+    connectionTimeout: 20s
+
+
+provisioningService:
+  protocol: https
+  host: localhost
+  port: 8084
+  jerseyClient:
+    timeout: 3s
+    connectionTimeout: 3s
+
+# Log out user on inactivity
+inactiveUserTimeoutMillSec: 7200000
+
+backupScriptPath: /opt/dlab/tmp/backup.py
+backupDirectory: /opt/dlab/tmp/result
+keyDirectory: ${KEYS_DIR}
+responseDirectory: /opt/dlab/tmp
+handlerDirectory: /opt/dlab/handlers
+dockerLogDirectory: ${LOG_ROOT_DIR}
+warmupPollTimeout: 2m
+resourceStatusPollTimeout: 300m
+keyLoaderPollTimeout: 30m
+requestEnvStatusTimeout: 50s
+adminKey: KEYNAME
+edgeImage: docker.dlab-edge
+fileLengthCheckDelay: 500ms
+
+<#if CLOUD_TYPE == "aws">
+emrEC2RoleDefault: EMR_EC2_DefaultRole
+emrServiceRoleDefault: EMR_DefaultRole
+</#if>
+
+processMaxThreadsPerJvm: 50
+processMaxThreadsPerUser: 5
+processTimeout: 180m
+
+handlersPersistenceEnabled: true
+
+server:
+  requestLog:
+    appenders:
+      - type: file
+        currentLogFilename: ${LOG_ROOT_DIR}/provisioning/request-provisioning.log
+        archive: true
+        archivedLogFilenamePattern: ${LOG_ROOT_DIR}/provisioning/request-provisioning-%d{yyyy-MM-dd}.log.gz
+        archivedFileCount: 10
+  applicationConnectors:
+#    - type: http
+    - type: https
+      port: 8084
+      certAlias: dlab
+      validateCerts: true
+      keyStorePath: ${KEY_STORE_PATH}
+      keyStorePassword: ${KEY_STORE_PASSWORD}
+      trustStorePath: ${TRUST_STORE_PATH}
+      trustStorePassword: ${TRUST_STORE_PASSWORD}
+  adminConnectors:
+#    - type: http
+    - type: https
+      port: 8085
+      certAlias: dlab
+      validateCerts: true
+      keyStorePath: ${KEY_STORE_PATH}
+      keyStorePassword: ${KEY_STORE_PASSWORD}
+      trustStorePath: ${TRUST_STORE_PATH}
+      trustStorePassword: ${TRUST_STORE_PASSWORD}
+
+logging:
+  level: INFO
+  loggers:
+    com.epam: TRACE
+    com.aegisql: INFO
+  appenders:
+<#if DEV_MODE == "true">
+    - type: console
+</#if>
+    - type: file
+      currentLogFilename: ${LOG_ROOT_DIR}/provisioning/provisioning.log
+      archive: true
+      archivedLogFilenamePattern: ${LOG_ROOT_DIR}/provisioning/provisioning-%d{yyyy-MM-dd}.log.gz
+      archivedFileCount: 10
+
+keycloakConfiguration:
+  realm: dlab
+  bearer-only: true
+  auth-server-url: http://KEYCLOACK_HOST/auth
+  ssl-required: none
+  register-node-at-startup: true
+  register-node-period: 600
+  resource: dlab-ui
+  credentials:
+    secret: CLIENT_SECRET
+
+cloudProperties:
+  os: CONF_OS
+  serviceBaseName: SERVICE_BASE_NAME
+  edgeInstanceSize: EDGE_INSTANCE_SIZE
+  subnetId: SUBNET_ID
+  region: REGION
+  zone: ZONE
+  confTagResourceId: TAG_RESOURCE_ID
+  securityGroupIds: SG_IDS
+  ssnInstanceSize: SSN_INSTANCE_SIZE
+  notebookVpcId: VPC2_ID
+  notebookSubnetId: SUBNET2_ID
+  confKeyDir: CONF_KEY_DIR
+  vpcId: VPC_ID
+  peeringId: PEERING_ID
+  azureResourceGroupName: AZURE_RESOURCE_GROUP_NAME
+  ssnStorageAccountTagName: AZURE_SSN_STORAGE_ACCOUNT_TAG
+  sharedStorageAccountTagName: AZURE_SHARED_STORAGE_ACCOUNT_TAG
+  datalakeTagName: AZURE_DATALAKE_TAG
+  azureClientId: AZURE_CLIENT_ID
+  gcpProjectId: GCP_PROJECT_ID
+  ldap:
+    host: LDAP_HOST
+    dn: LDAP_DN
+    ou: LDAP_OU
+    user: LDAP_USER_NAME
+    password: LDAP_USER_PASSWORD
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/main/main.tf b/infrastructure-provisioning/terraform/gcp/main/main.tf
new file mode 100644
index 0000000..4393f59
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/main/main.tf
@@ -0,0 +1,109 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+provider "google" {
+  credentials = "${var.credentials}"
+  project     = "${var.project_name}"
+  region      = "${var.region_var}"
+  zone        = "${var.zone_var}"
+}
+
+module "common" {
+  source            = "../modules/common"
+  project_tag       = "${var.project_tag}"
+  endpoint_tag      = "${var.endpoint_tag}"
+  user_tag          = "${var.user_tag}"
+  custom_tag        = "${var.custom_tag}"
+  product           = "${var.product_name}"
+  region            = "${var.region_var}"
+  vpc_name          = "${var.vpc_name}"
+  fw_ingress        = "${var.fw_ingress}"
+  fw_egress_public  = "${var.fw_egress_public}"
+  fw_egress_private = "${var.fw_egress_private}"
+  network_tag       = "${var.network_tag}"
+  cidr_range        = "${var.cidr_range}"
+  traefik_cidr      = "${var.traefik_cidr}"
+}
+
+module "notebook" {
+  source          = "../modules/notebook"
+  project_tag     = "${var.project_tag}"
+  endpoint_tag    = "${var.endpoint_tag}"
+  user_tag        = "${var.user_tag}"
+  custom_tag      = "${var.custom_tag}"
+  product         = "${var.product_name}"
+  notebook_name   = "${var.notebook_name}"
+  zone_var        = "${var.zone_var}"
+  vpc_name        = "${var.vpc_name}"
+  subnet_name     = "${var.subnet_name}"
+  network_tag     = "${var.network_tag}"
+  sa_email        = "${var.sa_email}"
+  ami             = "${var.ami}"
+  machine_type    = "${var.machine_type}"
+  ssh_key         = "${var.ssh_key}"
+  gpu_accelerator = "${var.gpu_accelerator}"
+}
+
+module "data_engine" {
+  source          = "../modules/data_engine"
+  project_tag     = "${var.project_tag}"
+  endpoint_tag    = "${var.endpoint_tag}"
+  user_tag        = "${var.user_tag}"
+  custom_tag      = "${var.custom_tag}"
+  product         = "${var.product_name}"
+  notebook_name   = "${var.notebook_name}"
+  zone_var        = "${var.zone_var}"
+  vpc_name        = "${var.vpc_name}"
+  subnet_name     = "${var.subnet_name}"
+  network_tag     = "${var.network_tag}"
+  sa_email        = "${var.sa_email}"
+  ami             = "${var.ami}"
+  ssh_key         = "${var.ssh_key}"
+  gpu_accelerator = "${var.gpu_accelerator}"
+  cluster_name    = "${var.cluster_name}"
+  total_count     = "${var.total_count}"
+  master_shape    = "${var.master_shape}"
+  slave_shape     = "${var.slave_shape}"
+}
+
+module "dataproc" {
+  source            = "../modules/dataproc"
+  region            = "${var.region_var}"
+  project_tag       = "${var.project_tag}"
+  endpoint_tag      = "${var.endpoint_tag}"
+  user_tag          = "${var.user_tag}"
+  custom_tag        = "${var.custom_tag}"
+  product           = "${var.product_name}"
+  notebook_name     = "${var.notebook_name}"
+  zone_var          = "${var.zone_var}"
+  vpc_name          = "${var.vpc_name}"
+  subnet_name       = "${var.subnet_name}"
+  network_tag       = "${var.network_tag}"
+  sa_email          = "${var.sa_email}"
+  ami               = "${var.ami}"
+  ssh_key           = "${var.ssh_key}"
+  gpu_accelerator   = "${var.gpu_accelerator}"
+  cluster_name      = "${var.cluster_name}"
+  total_count       = "${var.total_count}"
+  master_shape      = "${var.master_shape}"
+  slave_shape       = "${var.slave_shape}"
+  preemptible_count = "${var.preemptible_count}"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/main/variables.tf b/infrastructure-provisioning/terraform/gcp/main/variables.tf
new file mode 100644
index 0000000..3c2feb0
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/main/variables.tf
@@ -0,0 +1,76 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "credentials" {}
+
+variable "project_name" {}
+
+variable "project_tag" {}
+
+variable "endpoint_tag" {}
+
+variable "user_tag" {}
+
+variable "custom_tag" {}
+
+variable "notebook_name" {}
+
+variable "region_var" {}
+
+variable "zone_var" {}
+
+variable "product_name" {}
+
+variable "vpc_name" {}
+
+variable "subnet_name" {}
+
+variable "fw_ingress" {}
+
+variable "fw_egress_public" {}
+
+variable "fw_egress_private" {}
+
+variable "network_tag" {}
+
+variable "sa_email" {}
+
+variable "cidr_range" {}
+
+variable "traefik_cidr" {}
+
+variable "ami" {}
+
+variable "machine_type" {}
+
+variable "ssh_key" {}
+
+variable "gpu_accelerator" {}
+
+variable "cluster_name" {}
+
+variable "total_count" {}
+
+variable "master_shape" {}
+
+variable "slave_shape" {}
+
+variable "preemptible_count" {}
diff --git a/infrastructure-provisioning/terraform/gcp/modules/common/iam.tf b/infrastructure-provisioning/terraform/gcp/modules/common/iam.tf
new file mode 100644
index 0000000..bd65eb9
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/modules/common/iam.tf
@@ -0,0 +1,57 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  service_name = "${var.project_tag}-ps-sa"
+  role_name    = "${var.project_tag}-ps-role"
+}
+
+resource "google_service_account" "ps_sa" {
+  #Create service account for notebooks and computational resources
+  account_id   = "${var.project_tag}-ps-sa"
+  display_name = "${var.project_tag}-ps-sa"
+}
+
+resource "google_service_account_key" "ps_sa_key" {
+  #Create service account key
+  depends_on         = ["google_project_iam_member.iam"]
+  service_account_id = google_service_account.ps_sa.name
+}
+
+resource "google_project_iam_custom_role" "ps-custom-role" {
+  #Create custom role for ps_sa
+  role_id     = "${replace("${var.project_tag}-ps-role", "-", "_")}"
+  title       = "${var.project_tag}-ps-role"
+  permissions = "${var.ps_policy}"
+}
+
+resource "google_project_iam_member" "role_for_member" {
+  #Grant the custom role for the ps_sa
+  member = "serviceAccount:${google_service_account.ps_sa.email}"
+  role   = "${google_project_iam_custom_role.ps-custom-role.id}"
+}
+
+resource "google_project_iam_member" "iam" {
+  #Grant other roles for the ps_sa
+  count  = "${length(var.ps_roles)}"
+  member = "serviceAccount:${google_service_account.ps_sa.email}"
+  role   = "${element(var.ps_roles, count.index)}"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/modules/common/network.tf b/infrastructure-provisioning/terraform/gcp/modules/common/network.tf
new file mode 100644
index 0000000..cf3d294
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/modules/common/network.tf
@@ -0,0 +1,60 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+resource "google_compute_subnetwork" "subnet" {
+  name          = "${var.project_tag}-subnet"
+  ip_cidr_range = "${var.cidr_range}"
+  region        = "${var.region}"
+  network       = "${var.vpc_name}"
+}
+
+resource "google_compute_firewall" "fw_ingress" {
+  name    = "${var.fw_ingress}"
+  network = "${var.vpc_name}"
+  allow {
+    protocol = "all"
+  }
+  target_tags   = ["${var.network_tag}"]
+  source_ranges = ["${var.cidr_range}", "${var.traefik_cidr}"]
+}
+
+resource "google_compute_firewall" "fw_egress_public" {
+  name      = "${var.fw_egress_public}"
+  network   = "${var.vpc_name}"
+  direction = "EGRESS"
+  allow {
+    protocol = "tcp"
+    ports    = ["443"]
+  }
+  target_tags        = ["${var.network_tag}"]
+  destination_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "fw_egress_private" {
+  name      = "${var.fw_egress_private}"
+  network   = "${var.vpc_name}"
+  direction = "EGRESS"
+  allow {
+    protocol = "all"
+  }
+  target_tags        = ["${var.network_tag}"]
+  destination_ranges = ["${var.cidr_range}", "${var.traefik_cidr}"]
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/modules/common/variables.tf
similarity index 65%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/modules/common/variables.tf
index 951fdd7..448d373 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/modules/common/variables.tf
@@ -19,22 +19,42 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+variable "project_tag" {}
 
+variable "endpoint_tag" {}
 
-USER root
+variable "user_tag" {}
 
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
+variable "custom_tag" {}
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
+variable "region" {}
 
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+variable "product" {}
+
+variable "vpc_name" {}
+
+variable "fw_ingress" {}
+
+variable "fw_egress_public" {}
+
+variable "fw_egress_private" {}
+
+variable "network_tag" {}
+
+variable "cidr_range" {}
+
+variable "traefik_cidr" {}
+
+variable "ps_roles" {
+  type = "list"
+  default = [
+    "roles/dataproc.worker"
+  ]
+}
+
+variable "ps_policy" {
+  type = "list"
+  default = [
+
+  ]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/modules/data_engine/instance.tf b/infrastructure-provisioning/terraform/gcp/modules/data_engine/instance.tf
new file mode 100644
index 0000000..a185a57
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/modules/data_engine/instance.tf
@@ -0,0 +1,122 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  notebook_name = "${var.project_tag}-nb-${var.notebook_name}"
+  cluster_name  = "${var.project_tag}-de-${var.notebook_name}-${var.cluster_name}"
+}
+
+resource "google_compute_instance" "master" {
+  name         = "${local.cluster_name}-m"
+  machine_type = "${var.master_shape}"
+  tags         = ["${var.network_tag}"]
+  zone         = "${var.zone_var}"
+
+  boot_disk {
+    initialize_params {
+      image = "${var.ami}"
+      size  = 30
+    }
+  }
+
+  labels = {
+    name          = "${local.cluster_name}-m"
+    notebook_name = "${local.notebook_name}"
+    project       = "${var.project_tag}"
+    product       = "${var.product}"
+    type          = "master"
+    user          = "${var.user_tag}"
+  }
+
+  metadata = {
+    ssh-keys = "ubuntu:${file("${var.ssh_key}")}"
+  }
+
+  service_account {
+    email  = "${var.sa_email}"
+    scopes = ["https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute"]
+  }
+
+  network_interface {
+    network    = "${var.vpc_name}"
+    subnetwork = "${var.subnet_name}"
+  }
+
+  guest_accelerator {
+    count = "${var.gpu_accelerator != "false" ? 1 : 0}"
+    type  = "nvidia-tesla-k80"
+  }
+
+  scheduling {
+    on_host_maintenance = "${var.gpu_accelerator != "false" ? "TERMINATE" : "MIGRATE"}"
+  }
+
+}
+
+
+resource "google_compute_instance" "slave" {
+  count        = "${var.total_count - 1}"
+  name         = "${local.cluster_name}-s${count.index + 1}"
+  machine_type = "${var.slave_shape}"
+  tags         = ["${var.network_tag}"]
+  zone         = "${var.zone_var}"
+
+  boot_disk {
+    initialize_params {
+      image = "${var.ami}"
+      size  = 30
+    }
+  }
+
+  labels = {
+    name          = "${local.cluster_name}-s${count.index + 1}"
+    notebook_name = "${local.notebook_name}"
+    project           = "${var.project_tag}"
+    product       = "${var.product}"
+    sbn           = "${var.project_tag}"
+    type          = "slave"
+    user          = "${var.user_tag}"
+  }
+
+  metadata = {
+    ssh-keys = "ubuntu:${file("${var.ssh_key}")}"
+  }
+
+  service_account {
+    email  = "${var.sa_email}"
+    scopes = ["https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute"]
+  }
+
+  network_interface {
+    network    = "${var.vpc_name}"
+    subnetwork = "${var.subnet_name}"
+  }
+
+  guest_accelerator {
+    count = "${var.gpu_accelerator != "false" ? 1 : 0}"
+    type  = "nvidia-tesla-k80"
+  }
+
+  scheduling {
+    on_host_maintenance = "${var.gpu_accelerator != "false" ? "TERMINATE" : "MIGRATE"}"
+  }
+
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/modules/data_engine/variables.tf
similarity index 66%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/modules/data_engine/variables.tf
index 951fdd7..e950ed1 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/modules/data_engine/variables.tf
@@ -19,22 +19,38 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+variable "project_tag" {}
 
+variable "endpoint_tag" {}
 
-USER root
+variable "user_tag" {}
 
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
+variable "custom_tag" {}
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
+variable "product" {}
 
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+variable "notebook_name" {}
+
+variable "zone_var" {}
+
+variable "vpc_name" {}
+
+variable "subnet_name" {}
+
+variable "network_tag" {}
+
+variable "sa_email" {}
+
+variable "ami" {}
+
+variable "ssh_key" {}
+
+variable "gpu_accelerator" {}
+
+variable "cluster_name" {}
+
+variable "total_count" {}
+
+variable "master_shape" {}
+
+variable "slave_shape" {}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/modules/dataproc/instance.tf b/infrastructure-provisioning/terraform/gcp/modules/dataproc/instance.tf
new file mode 100644
index 0000000..1419c56
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/modules/dataproc/instance.tf
@@ -0,0 +1,63 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  dataproc_name = "${var.project_tag}-des-${var.notebook_name}-${var.cluster_name}"
+}
+
+resource "google_dataproc_cluster" "dataproc" {
+    name       = "${local.dataproc_name}"
+    region     = "${var.region}"
+    labels = {
+        computational_name = "${var.cluster_name}"
+        name               = "${local.dataproc_name}"
+        sbn                = "${var.project_tag}"
+        user               = "${var.user_tag}"
+    }
+
+    cluster_config {
+
+        master_config {
+            num_instances     = 1
+            machine_type      = "${var.master_shape}"
+            disk_config {
+                boot_disk_size_gb = 30
+            }
+        }
+
+        worker_config {
+            num_instances     = "${var.total_count - 1}"
+            machine_type      = "${var.slave_shape}"
+            disk_config {
+                boot_disk_size_gb = 30
+            }
+        }
+
+        gce_cluster_config {
+            subnetwork = "${var.subnet_name}"
+            tags    = ["${var.network_tag}"]
+        }
+
+        preemptible_worker_config {
+            num_instances = "${var.preemptible_count}"
+        }
+    }
+  }
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/modules/dataproc/variables.tf
similarity index 64%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/modules/dataproc/variables.tf
index 951fdd7..bac08a2 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/modules/dataproc/variables.tf
@@ -19,22 +19,42 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+variable "region" {}
 
+variable "project_tag" {}
 
-USER root
+variable "endpoint_tag" {}
 
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
+variable "user_tag" {}
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
+variable "custom_tag" {}
 
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+variable "product" {}
+
+variable "notebook_name" {}
+
+variable "zone_var" {}
+
+variable "vpc_name" {}
+
+variable "subnet_name" {}
+
+variable "network_tag" {}
+
+variable "sa_email" {}
+
+variable "ami" {}
+
+variable "ssh_key" {}
+
+variable "gpu_accelerator" {}
+
+variable "cluster_name" {}
+
+variable "total_count" {}
+
+variable "master_shape" {}
+
+variable "slave_shape" {}
+
+variable "preemptible_count" {}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/modules/notebook/instance.tf b/infrastructure-provisioning/terraform/gcp/modules/notebook/instance.tf
new file mode 100644
index 0000000..e89f69b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/modules/notebook/instance.tf
@@ -0,0 +1,86 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  name = "${var.project_tag}-nb-${var.notebook_name}"
+}
+
+resource "google_compute_disk" "secondary" {
+  name = "${local.name}-secondary"
+  zone = "${var.zone_var}"
+  labels = {
+    name    = "${local.name}"
+    product = "${var.product}"
+    project = "${var.project_tag}"
+    user    = "${var.user_tag}"
+  }
+  physical_block_size_bytes = 4096
+  size                      = 30
+}
+
+resource "google_compute_instance" "notebook" {
+  name         = "${local.name}"
+  machine_type = "${var.machine_type}"
+  tags         = ["${var.network_tag}"]
+  zone         = "${var.zone_var}"
+
+  boot_disk {
+    initialize_params {
+      image = "${var.ami}"
+      size  = 12
+    }
+  }
+
+  attached_disk {
+    source = "${google_compute_disk.secondary.self_link}"
+  }
+
+  labels = {
+    name    = "${local.name}"
+    product = "${var.product}"
+    project = "${var.project_tag}"
+    user    = "${var.user_tag}"
+  }
+
+  metadata = {
+    ssh-keys = "ubuntu:${file("${var.ssh_key}")}"
+  }
+
+  service_account {
+    email  = "${var.sa_email}"
+    scopes = ["https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute"]
+  }
+
+  network_interface {
+    network    = "${var.vpc_name}"
+    subnetwork = "${var.subnet_name}"
+  }
+
+  guest_accelerator {
+    count = "${var.gpu_accelerator != "false" ? 1 : 0}"
+    type  = "nvidia-tesla-k80"
+  }
+
+  scheduling {
+    on_host_maintenance = "${var.gpu_accelerator != "false" ? "TERMINATE" : "MIGRATE"}"
+  }
+
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/modules/notebook/variables.tf
similarity index 70%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/modules/notebook/variables.tf
index 951fdd7..bf51d34 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/modules/notebook/variables.tf
@@ -19,22 +19,32 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+variable "project_tag" {}
 
+variable "endpoint_tag" {}
 
-USER root
+variable "user_tag" {}
 
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
+variable "custom_tag" {}
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
+variable "product" {}
 
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+variable "notebook_name" {}
+
+variable "zone_var" {}
+
+variable "vpc_name" {}
+
+variable "subnet_name" {}
+
+variable "network_tag" {}
+
+variable "sa_email" {}
+
+variable "ami" {}
+
+variable "machine_type" {}
+
+variable "ssh_key" {}
+
+variable "gpu_accelerator" {}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/main.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/main.tf
new file mode 100644
index 0000000..6521774
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/main.tf
@@ -0,0 +1,80 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+provider "google" {
+  credentials = file(var.credentials_file_path)
+  project     = var.project_id
+  region      = var.region
+  zone        = var.zone
+}
+
+module "gke_cluster" {
+  source                    = "./modules/gke"
+  additional_tag            = var.additional_tag
+  service_base_name         = var.service_base_name
+  region                    = var.region
+  gke_cluster_version       = var.gke_cluster_version
+  ssn_k8s_workers_count     = var.ssn_k8s_workers_count
+  ssn_k8s_workers_shape     = var.ssn_k8s_workers_shape
+  project_id                = var.project_id
+  service_account_iam_roles = var.service_account_iam_roles
+  vpc_name                  = var.vpc_name
+  subnet_name               = var.subnet_name
+  subnet_cidr               = var.subnet_cidr
+}
+
+module "helm_charts" {
+  source = "./modules/helm_charts"
+  mongo_dbname               = var.mongo_dbname
+  mongo_db_username          = var.mongo_db_username
+  mongo_service_port         = var.mongo_service_port
+  mongo_service_name         = var.mongo_service_name
+  ssn_k8s_alb_dns_name       = var.ssn_k8s_alb_dns_name
+  service_base_name          = var.service_base_name
+  ldap_host                  = var.ldap_host
+  ldap_dn                    = var.ldap_dn
+  ldap_users_group           = var.ldap_users_group
+  ldap_user                  = var.ldap_user
+  ldap_bind_creds            = var.ldap_bind_creds
+  keycloak_user              = var.keycloak_user
+  ldap_usernameAttr          = var.ldap_usernameAttr
+  ldap_rdnAttr               = var.ldap_rdnAttr
+  ldap_uuidAttr              = var.ldap_uuidAttr
+  mysql_db_name              = var.mysql_db_name
+  mysql_user                 = var.mysql_user
+  region                     = var.region
+  mongo_image_tag            = var.mongo_image_tag
+  mongo_node_port            = var.mongo_node_port
+  gke_cluster_name           = module.gke_cluster.gke_cluster_name
+  big_query_dataset          = var.big_query_dataset
+  env_os                     = var.env_os
+  namespace_name             = var.namespace_name
+  credentials_file_path      = var.credentials_file_path
+  project_id                 = var.project_id
+  custom_certs_enabled       = var.custom_certs_enabled
+  custom_cert_path           = var.custom_cert_path
+  custom_certs_host          = var.custom_certs_host
+  custom_key_path            = var.custom_key_path
+  mysql_disk_size            = var.mysql_disk_size
+  domain                     = var.domain
+  keycloak_realm_name        = var.keycloak_realm_name
+  keycloak_client_id         = var.keycloak_client_id
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/buckets.tf
similarity index 70%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/buckets.tf
index 951fdd7..6aca365 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/buckets.tf
@@ -19,22 +19,16 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+locals {
+  ssn_bucket_name = "${var.service_base_name}-ssn-bucket"
+}
 
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+resource "google_storage_bucket" "ssn_bucket" {
+  name     = local.ssn_bucket_name
+  force_destroy = true
+  labels = {
+    name                              = local.ssn_bucket_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.service_base_name}-tag"    = local.ssn_bucket_name
+  }
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
new file mode 100644
index 0000000..21bdf0a
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
@@ -0,0 +1,101 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  additional_tag     = split(":", var.additional_tag)
+  gke_name           = "${var.service_base_name}-k8s-cluster"
+  gke_node_pool_name = "${var.service_base_name}-node-pool"
+}
+
+resource "random_string" "ssn_keystore_password" {
+  length = 16
+  special = false
+}
+
+resource "random_string" "endpoint_keystore_password" {
+  length = 16
+  special = false
+}
+
+resource "google_container_cluster" "ssn_k8s_gke_cluster" {
+  name                     = local.gke_name
+  location                 = var.region
+  remove_default_node_pool = true
+  initial_node_count       = 1
+  min_master_version       = var.gke_cluster_version
+  network                  = data.google_compute_network.ssn_gke_vpc_data.self_link
+  subnetwork               = data.google_compute_subnetwork.ssn_gke_subnet_data.self_link
+  enable_legacy_abac       = true
+  resource_labels          = {
+    name                              = local.gke_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.service_base_name}-tag"    = local.gke_name
+  }
+  master_auth {
+    username = ""
+    password = ""
+
+    client_certificate_config {
+      issue_client_certificate = true
+    }
+  }
+  depends_on = [google_project_iam_member.iam, google_service_account.ssn_k8s_sa,
+                google_service_account_key.nodes_sa_key]
+}
+
+resource "google_container_node_pool" "ssn_k8s_gke_node_pool" {
+  name       = local.gke_node_pool_name
+  location   = var.region
+  cluster    = google_container_cluster.ssn_k8s_gke_cluster.name
+  node_count = var.ssn_k8s_workers_count
+  version    = var.gke_cluster_version
+  depends_on = [google_container_cluster.ssn_k8s_gke_cluster]
+
+  node_config {
+    machine_type = var.ssn_k8s_workers_shape
+    service_account = google_service_account.ssn_k8s_sa.email
+    labels = {
+      name                              = local.gke_node_pool_name
+      "${local.additional_tag[0]}"      = local.additional_tag[1]
+      "${var.service_base_name}-tag"    = local.gke_node_pool_name
+    }
+
+    metadata = {
+      disable-legacy-endpoints = "true"
+    }
+
+    oauth_scopes = [
+      "https://www.googleapis.com/auth/compute",
+      "https://www.googleapis.com/auth/devstorage.read_only",
+      "https://www.googleapis.com/auth/cloud-platform",
+      "https://www.googleapis.com/auth/logging.write",
+      "https://www.googleapis.com/auth/monitoring",
+    ]
+  }
+}
+
+data "google_container_cluster" "ssn_k8s_gke_cluster" {
+  name       = local.gke_name
+  location   = var.region
+  depends_on = [google_container_cluster.ssn_k8s_gke_cluster, google_container_node_pool.ssn_k8s_gke_node_pool]
+}
+
+data "google_client_config" "current" {}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/iam.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/iam.tf
new file mode 100644
index 0000000..3634349
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/iam.tf
@@ -0,0 +1,45 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  service_account_name = "${var.service_base_name}-sa"
+  role_name            = "${var.service_base_name}-role"
+}
+
+resource "google_service_account" "ssn_k8s_sa" {
+  account_id   = local.service_account_name
+  display_name = local.service_account_name
+  project      = var.project_id
+}
+
+resource "google_project_iam_member" "iam" {
+  count   = length(var.service_account_iam_roles)
+  member  = "serviceAccount:${google_service_account.ssn_k8s_sa.email}"
+  project = var.project_id
+  role    = var.service_account_iam_roles[count.index]
+}
+
+
+
+resource "google_service_account_key" "nodes_sa_key" {
+  depends_on         = [google_project_iam_member.iam]
+  service_account_id = google_service_account.ssn_k8s_sa.name
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
new file mode 100644
index 0000000..c3bbdcb
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
@@ -0,0 +1,49 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  ssn_vpc_name      = "${var.service_base_name}-vpc"
+  ssn_subnet_name   = "${var.service_base_name}-subnet"
+}
+
+resource "google_compute_network" "ssn_gke_vpc" {
+  count                   = var.vpc_name == "" ? 1 : 0
+  name                    = local.ssn_vpc_name
+  auto_create_subnetworks = false
+}
+
+data "google_compute_network" "ssn_gke_vpc_data" {
+  name = var.vpc_name == "" ? google_compute_network.ssn_gke_vpc.0.name : var.vpc_name
+}
+
+resource "google_compute_subnetwork" "ssn_gke_subnet" {
+  count         = var.subnet_name == "" ? 1 : 0
+  name          = local.ssn_subnet_name
+  ip_cidr_range = var.subnet_cidr
+  region        = var.region
+  network       = data.google_compute_network.ssn_gke_vpc_data.self_link
+}
+
+data "google_compute_subnetwork" "ssn_gke_subnet_data" {
+  name   = var.subnet_name == "" ? google_compute_subnetwork.ssn_gke_subnet.0.name : var.subnet_name
+  region = var.region
+}
+
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/Chart.yaml
index 16da950..039e6d0 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: cert-manager-crd
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..b5ada58
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cert-manager-crd.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cert-manager-crd.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cert-manager-crd.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "cert-manager-crd.labels" -}}
+app.kubernetes.io/name: {{ include "cert-manager-crd.name" . }}
+helm.sh/chart: {{ include "cert-manager-crd.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/crd.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/crd.yaml
new file mode 100644
index 0000000..c2d6a4c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/templates/crd.yaml
@@ -0,0 +1,1449 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: certificates.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.conditions[?(@.type=="Ready")].status
+    name: Ready
+    type: string
+  - JSONPath: .spec.secretName
+    name: Secret
+    type: string
+  - JSONPath: .spec.issuerRef.name
+    name: Issuer
+    priority: 1
+    type: string
+  - JSONPath: .status.conditions[?(@.type=="Ready")].message
+    name: Status
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: Certificate
+    plural: certificates
+    shortNames:
+    - cert
+    - certs
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            acme:
+              description: ACME contains configuration specific to ACME Certificates.
+                Notably, this contains details on how the domain names listed on this
+                Certificate resource should be 'solved', i.e. mapping HTTP01 and DNS01
+                providers to DNS names.
+              properties:
+                config:
+                  items:
+                    properties:
+                      domains:
+                        description: Domains is the list of domains that this SolverConfig
+                          applies to.
+                        items:
+                          type: string
+                        type: array
+                    required:
+                    - domains
+                    type: object
+                  type: array
+              required:
+              - config
+              type: object
+            commonName:
+              description: CommonName is a common name to be used on the Certificate.
+                If no CommonName is given, then the first entry in DNSNames is used
+                as the CommonName. The CommonName should have a length of 64 characters
+                or fewer to avoid generating invalid CSRs; in order to have longer
+                domain names, set the CommonName (or first DNSNames entry) to have
+                64 characters or fewer, and then add the longer domain name to DNSNames.
+              type: string
+            dnsNames:
+              description: DNSNames is a list of subject alt names to be used on the
+                Certificate. If no CommonName is given, then the first entry in DNSNames
+                is used as the CommonName and must have a length of 64 characters
+                or fewer.
+              items:
+                type: string
+              type: array
+            duration:
+              description: Certificate default Duration
+              type: string
+            ipAddresses:
+              description: IPAddresses is a list of IP addresses to be used on the
+                Certificate
+              items:
+                type: string
+              type: array
+            isCA:
+              description: IsCA will mark this Certificate as valid for signing. This
+                implies that the 'signing' usage is set
+              type: boolean
+            issuerRef:
+              description: IssuerRef is a reference to the issuer for this certificate.
+                If the 'kind' field is not set, or set to 'Issuer', an Issuer resource
+                with the given name in the same namespace as the Certificate will
+                be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer
+                with the provided name will be used. The 'name' field in this stanza
+                is required at all times.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+            keyAlgorithm:
+              description: KeyAlgorithm is the private key algorithm of the corresponding
+                private key for this certificate. If provided, allowed values are
+                either "rsa" or "ecdsa" If KeyAlgorithm is specified and KeySize is
+                not provided, key size of 256 will be used for "ecdsa" key algorithm
+                and key size of 2048 will be used for "rsa" key algorithm.
+              enum:
+              - rsa
+              - ecdsa
+              type: string
+            keyEncoding:
+              description: KeyEncoding is the private key cryptography standards (PKCS)
+                for this certificate's private key to be encoded in. If provided,
+                allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8,
+                respectively. If KeyEncoding is not specified, then PKCS#1 will be
+                used by default.
+              type: string
+            keySize:
+              description: KeySize is the key bit size of the corresponding private
+                key for this certificate. If provided, value must be between 2048
+                and 8192 inclusive when KeyAlgorithm is empty or is set to "rsa",
+                and value must be one of (256, 384, 521) when KeyAlgorithm is set
+                to "ecdsa".
+              format: int64
+              type: integer
+            organization:
+              description: Organization is the organization to be used on the Certificate
+              items:
+                type: string
+              type: array
+            renewBefore:
+              description: Certificate renew before expiration duration
+              type: string
+            secretName:
+              description: SecretName is the name of the secret resource to store
+                this secret in
+              type: string
+          required:
+          - secretName
+          - issuerRef
+          type: object
+        status:
+          properties:
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+            lastFailureTime:
+              format: date-time
+              type: string
+            notAfter:
+              description: The expiration time of the certificate stored in the secret
+                named by this resource in spec.secretName.
+              format: date-time
+              type: string
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: certificaterequests.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.conditions[?(@.type=="Ready")].status
+    name: Ready
+    type: string
+  - JSONPath: .spec.issuerRef.name
+    name: Issuer
+    priority: 1
+    type: string
+  - JSONPath: .status.conditions[?(@.type=="Ready")].message
+    name: Status
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: CertificateRequest
+    plural: certificaterequests
+    shortNames:
+    - cr
+    - crs
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            csr:
+              description: Byte slice containing the PEM encoded CertificateSigningRequest
+              format: byte
+              type: string
+            duration:
+              description: Requested certificate default Duration
+              type: string
+            isCA:
+              description: IsCA will mark the resulting certificate as valid for signing.
+                This implies that the 'signing' usage is set
+              type: boolean
+            issuerRef:
+              description: IssuerRef is a reference to the issuer for this CertificateRequest.  If
+                the 'kind' field is not set, or set to 'Issuer', an Issuer resource
+                with the given name in the same namespace as the CertificateRequest
+                will be used.  If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer
+                with the provided name will be used. The 'name' field in this stanza
+                is required at all times. The group field refers to the API group
+                of the issuer which defaults to 'certmanager.k8s.io' if empty.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+          required:
+          - issuerRef
+          type: object
+        status:
+          properties:
+            ca:
+              description: Byte slice containing the PEM encoded certificate authority
+                of the signed certificate.
+              format: byte
+              type: string
+            certificate:
+              description: Byte slice containing a PEM encoded signed certificate
+                resulting from the given certificate signing request.
+              format: byte
+              type: string
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: challenges.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.state
+    name: State
+    type: string
+  - JSONPath: .spec.dnsName
+    name: Domain
+    type: string
+  - JSONPath: .status.reason
+    name: Reason
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: Challenge
+    plural: challenges
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            authzURL:
+              description: AuthzURL is the URL to the ACME Authorization resource
+                that this challenge is a part of.
+              type: string
+            config:
+              description: 'Config specifies the solver configuration for this challenge.
+                Only **one** of ''config'' or ''solver'' may be specified, and if
+                both are specified then no action will be performed on the Challenge
+                resource. DEPRECATED: the ''solver'' field should be specified instead'
+              type: object
+            dnsName:
+              description: DNSName is the identifier that this challenge is for, e.g.
+                example.com.
+              type: string
+            issuerRef:
+              description: IssuerRef references a properly configured ACME-type Issuer
+                which should be used to create this Challenge. If the Issuer does
+                not exist, processing will be retried. If the Issuer is not an 'ACME'
+                Issuer, an error will be returned and the Challenge will be marked
+                as failed.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+            key:
+              description: Key is the ACME challenge key for this challenge
+              type: string
+            solver:
+              description: Solver contains the domain solving configuration that should
+                be used to solve this challenge resource. Only **one** of 'config'
+                or 'solver' may be specified, and if both are specified then no action
+                will be performed on the Challenge resource.
+              properties:
+                selector:
+                  description: Selector selects a set of DNSNames on the Certificate
+                    resource that should be solved using this challenge solver.
+                  properties:
+                    dnsNames:
+                      description: List of DNSNames that this solver will be used
+                        to solve. If specified and a match is found, a dnsNames selector
+                        will take precedence over a dnsZones selector. If multiple
+                        solvers match with the same dnsNames value, the solver with
+                        the most matching labels in matchLabels will be selected.
+                        If neither has more matches, the solver defined earlier in
+                        the list will be selected.
+                      items:
+                        type: string
+                      type: array
+                    dnsZones:
+                      description: List of DNSZones that this solver will be used
+                        to solve. The most specific DNS zone match specified here
+                        will take precedence over other DNS zone matches, so a solver
+                        specifying sys.example.com will be selected over one specifying
+                        example.com for the domain www.sys.example.com. If multiple
+                        solvers match with the same dnsZones value, the solver with
+                        the most matching labels in matchLabels will be selected.
+                        If neither has more matches, the solver defined earlier in
+                        the list will be selected.
+                      items:
+                        type: string
+                      type: array
+                    matchLabels:
+                      description: A label selector that is used to refine the set
+                        of certificate's that this challenge solver will apply to.
+                      type: object
+                  type: object
+              type: object
+            token:
+              description: Token is the ACME challenge token for this challenge.
+              type: string
+            type:
+              description: Type is the type of ACME challenge this resource represents,
+                e.g. "dns01" or "http01"
+              type: string
+            url:
+              description: URL is the URL of the ACME Challenge resource for this
+                challenge. This can be used to lookup details about the status of
+                this challenge.
+              type: string
+            wildcard:
+              description: Wildcard will be true if this challenge is for a wildcard
+                identifier, for example '*.example.com'
+              type: boolean
+          required:
+          - authzURL
+          - type
+          - url
+          - dnsName
+          - token
+          - key
+          - wildcard
+          - issuerRef
+          type: object
+        status:
+          properties:
+            presented:
+              description: Presented will be set to true if the challenge values for
+                this challenge are currently 'presented'. This *does not* imply the
+                self check is passing. Only that the values have been 'submitted'
+                for the appropriate challenge mechanism (i.e. the DNS01 TXT record
+                has been presented, or the HTTP01 configuration has been configured).
+              type: boolean
+            processing:
+              description: Processing is used to denote whether this challenge should
+                be processed or not. This field will only be set to true by the 'scheduling'
+                component. It will only be set to false by the 'challenges' controller,
+                after the challenge has reached a final state or timed out. If this
+                field is set to false, the challenge controller will not take any
+                more action.
+              type: boolean
+            reason:
+              description: Reason contains human readable information on why the Challenge
+                is in the current state.
+              type: string
+            state:
+              description: State contains the current 'state' of the challenge. If
+                not set, the state of the challenge is unknown.
+              enum:
+              - ""
+              - valid
+              - ready
+              - pending
+              - processing
+              - invalid
+              - expired
+              - errored
+              type: string
+          required:
+          - processing
+          - presented
+          - reason
+          type: object
+      required:
+      - metadata
+      - spec
+      - status
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: clusterissuers.certmanager.k8s.io
+spec:
+  group: certmanager.k8s.io
+  names:
+    kind: ClusterIssuer
+    plural: clusterissuers
+  scope: Cluster
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            acme:
+              properties:
+                email:
+                  description: Email is the email for this account
+                  type: string
+                privateKeySecretRef:
+                  description: PrivateKey is the name of a secret containing the private
+                    key for this user account.
+                  properties:
+                    key:
+                      description: The key of the secret to select from. Must be a
+                        valid secret key.
+                      type: string
+                    name:
+                      description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                        TODO: Add other useful fields. apiVersion, kind, uid?'
+                      type: string
+                  required:
+                  - name
+                  type: object
+                server:
+                  description: Server is the ACME server URL
+                  type: string
+                skipTLSVerify:
+                  description: If true, skip verifying the ACME server TLS certificate
+                  type: boolean
+                solvers:
+                  description: Solvers is a list of challenge solvers that will be
+                    used to solve ACME challenges for the matching domains.
+                  items:
+                    properties:
+                      selector:
+                        description: Selector selects a set of DNSNames on the Certificate
+                          resource that should be solved using this challenge solver.
+                        properties:
+                          dnsNames:
+                            description: List of DNSNames that this solver will be
+                              used to solve. If specified and a match is found, a
+                              dnsNames selector will take precedence over a dnsZones
+                              selector. If multiple solvers match with the same dnsNames
+                              value, the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          dnsZones:
+                            description: List of DNSZones that this solver will be
+                              used to solve. The most specific DNS zone match specified
+                              here will take precedence over other DNS zone matches,
+                              so a solver specifying sys.example.com will be selected
+                              over one specifying example.com for the domain www.sys.example.com.
+                              If multiple solvers match with the same dnsZones value,
+                              the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          matchLabels:
+                            description: A label selector that is used to refine the
+                              set of certificate's that this challenge solver will
+                              apply to.
+                            type: object
+                        type: object
+                    type: object
+                  type: array
+              required:
+              - server
+              - privateKeySecretRef
+              type: object
+            ca:
+              properties:
+                secretName:
+                  description: SecretName is the name of the secret used to sign Certificates
+                    issued by this Issuer.
+                  type: string
+              required:
+              - secretName
+              type: object
+            selfSigned:
+              type: object
+            vault:
+              properties:
+                auth:
+                  description: Vault authentication
+                  properties:
+                    appRole:
+                      description: This Secret contains a AppRole and Secret
+                      properties:
+                        path:
+                          description: Where the authentication path is mounted in
+                            Vault.
+                          type: string
+                        roleId:
+                          type: string
+                        secretRef:
+                          properties:
+                            key:
+                              description: The key of the secret to select from. Must
+                                be a valid secret key.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                          required:
+                          - name
+                          type: object
+                      required:
+                      - path
+                      - roleId
+                      - secretRef
+                      type: object
+                    tokenSecretRef:
+                      description: This Secret contains the Vault token key
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                  type: object
+                caBundle:
+                  description: Base64 encoded CA bundle to validate Vault server certificate.
+                    Only used if the Server URL is using HTTPS protocol. This parameter
+                    is ignored for plain HTTP protocol connection. If not set the
+                    system root certificates are used to validate the TLS connection.
+                  format: byte
+                  type: string
+                path:
+                  description: Vault URL path to the certificate role
+                  type: string
+                server:
+                  description: Server is the vault connection address
+                  type: string
+              required:
+              - auth
+              - server
+              - path
+              type: object
+            venafi:
+              properties:
+                cloud:
+                  description: Cloud specifies the Venafi cloud configuration settings.
+                    Only one of TPP or Cloud may be specified.
+                  properties:
+                    apiTokenSecretRef:
+                      description: APITokenSecretRef is a secret key selector for
+                        the Venafi Cloud API token.
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for Venafi Cloud
+                      type: string
+                  required:
+                  - url
+                  - apiTokenSecretRef
+                  type: object
+                tpp:
+                  description: TPP specifies Trust Protection Platform configuration
+                    settings. Only one of TPP or Cloud may be specified.
+                  properties:
+                    caBundle:
+                      description: CABundle is a PEM encoded TLS certifiate to use
+                        to verify connections to the TPP instance. If specified, system
+                        roots will not be used and the issuing CA for the TPP instance
+                        must be verifiable using the provided root. If not specified,
+                        the connection will be verified using the cert-manager system
+                        root certificates.
+                      format: byte
+                      type: string
+                    credentialsRef:
+                      description: CredentialsRef is a reference to a Secret containing
+                        the username and password for the TPP server. The secret must
+                        contain two keys, 'username' and 'password'.
+                      properties:
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for the Venafi TPP instance
+                      type: string
+                  required:
+                  - url
+                  - credentialsRef
+                  type: object
+                zone:
+                  description: Zone is the Venafi Policy Zone to use for this issuer.
+                    All requests made to the Venafi platform will be restricted by
+                    the named zone policy. This field is required.
+                  type: string
+              required:
+              - zone
+              type: object
+          type: object
+        status:
+          properties:
+            acme:
+              properties:
+                lastRegisteredEmail:
+                  description: LastRegisteredEmail is the email associated with the
+                    latest registered ACME account, in order to track changes made
+                    to registered account associated with the  Issuer
+                  type: string
+                uri:
+                  description: URI is the unique account identifier, which can also
+                    be used to retrieve account details from the CA
+                  type: string
+              type: object
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: issuers.certmanager.k8s.io
+spec:
+  group: certmanager.k8s.io
+  names:
+    kind: Issuer
+    plural: issuers
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            acme:
+              properties:
+                email:
+                  description: Email is the email for this account
+                  type: string
+                privateKeySecretRef:
+                  description: PrivateKey is the name of a secret containing the private
+                    key for this user account.
+                  properties:
+                    key:
+                      description: The key of the secret to select from. Must be a
+                        valid secret key.
+                      type: string
+                    name:
+                      description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                        TODO: Add other useful fields. apiVersion, kind, uid?'
+                      type: string
+                  required:
+                  - name
+                  type: object
+                server:
+                  description: Server is the ACME server URL
+                  type: string
+                skipTLSVerify:
+                  description: If true, skip verifying the ACME server TLS certificate
+                  type: boolean
+                solvers:
+                  description: Solvers is a list of challenge solvers that will be
+                    used to solve ACME challenges for the matching domains.
+                  items:
+                    properties:
+                      selector:
+                        description: Selector selects a set of DNSNames on the Certificate
+                          resource that should be solved using this challenge solver.
+                        properties:
+                          dnsNames:
+                            description: List of DNSNames that this solver will be
+                              used to solve. If specified and a match is found, a
+                              dnsNames selector will take precedence over a dnsZones
+                              selector. If multiple solvers match with the same dnsNames
+                              value, the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          dnsZones:
+                            description: List of DNSZones that this solver will be
+                              used to solve. The most specific DNS zone match specified
+                              here will take precedence over other DNS zone matches,
+                              so a solver specifying sys.example.com will be selected
+                              over one specifying example.com for the domain www.sys.example.com.
+                              If multiple solvers match with the same dnsZones value,
+                              the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          matchLabels:
+                            description: A label selector that is used to refine the
+                              set of certificate's that this challenge solver will
+                              apply to.
+                            type: object
+                        type: object
+                    type: object
+                  type: array
+              required:
+              - server
+              - privateKeySecretRef
+              type: object
+            ca:
+              properties:
+                secretName:
+                  description: SecretName is the name of the secret used to sign Certificates
+                    issued by this Issuer.
+                  type: string
+              required:
+              - secretName
+              type: object
+            selfSigned:
+              type: object
+            vault:
+              properties:
+                auth:
+                  description: Vault authentication
+                  properties:
+                    appRole:
+                      description: This Secret contains a AppRole and Secret
+                      properties:
+                        path:
+                          description: Where the authentication path is mounted in
+                            Vault.
+                          type: string
+                        roleId:
+                          type: string
+                        secretRef:
+                          properties:
+                            key:
+                              description: The key of the secret to select from. Must
+                                be a valid secret key.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                          required:
+                          - name
+                          type: object
+                      required:
+                      - path
+                      - roleId
+                      - secretRef
+                      type: object
+                    tokenSecretRef:
+                      description: This Secret contains the Vault token key
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                  type: object
+                caBundle:
+                  description: Base64 encoded CA bundle to validate Vault server certificate.
+                    Only used if the Server URL is using HTTPS protocol. This parameter
+                    is ignored for plain HTTP protocol connection. If not set the
+                    system root certificates are used to validate the TLS connection.
+                  format: byte
+                  type: string
+                path:
+                  description: Vault URL path to the certificate role
+                  type: string
+                server:
+                  description: Server is the vault connection address
+                  type: string
+              required:
+              - auth
+              - server
+              - path
+              type: object
+            venafi:
+              properties:
+                cloud:
+                  description: Cloud specifies the Venafi cloud configuration settings.
+                    Only one of TPP or Cloud may be specified.
+                  properties:
+                    apiTokenSecretRef:
+                      description: APITokenSecretRef is a secret key selector for
+                        the Venafi Cloud API token.
+                      properties:
+                        key:
+                          description: The key of the secret to select from. Must
+                            be a valid secret key.
+                          type: string
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for Venafi Cloud
+                      type: string
+                  required:
+                  - url
+                  - apiTokenSecretRef
+                  type: object
+                tpp:
+                  description: TPP specifies Trust Protection Platform configuration
+                    settings. Only one of TPP or Cloud may be specified.
+                  properties:
+                    caBundle:
+                      description: CABundle is a PEM encoded TLS certifiate to use
+                        to verify connections to the TPP instance. If specified, system
+                        roots will not be used and the issuing CA for the TPP instance
+                        must be verifiable using the provided root. If not specified,
+                        the connection will be verified using the cert-manager system
+                        root certificates.
+                      format: byte
+                      type: string
+                    credentialsRef:
+                      description: CredentialsRef is a reference to a Secret containing
+                        the username and password for the TPP server. The secret must
+                        contain two keys, 'username' and 'password'.
+                      properties:
+                        name:
+                          description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                            TODO: Add other useful fields. apiVersion, kind, uid?'
+                          type: string
+                      required:
+                      - name
+                      type: object
+                    url:
+                      description: URL is the base URL for the Venafi TPP instance
+                      type: string
+                  required:
+                  - url
+                  - credentialsRef
+                  type: object
+                zone:
+                  description: Zone is the Venafi Policy Zone to use for this issuer.
+                    All requests made to the Venafi platform will be restricted by
+                    the named zone policy. This field is required.
+                  type: string
+              required:
+              - zone
+              type: object
+          type: object
+        status:
+          properties:
+            acme:
+              properties:
+                lastRegisteredEmail:
+                  description: LastRegisteredEmail is the email associated with the
+                    latest registered ACME account, in order to track changes made
+                    to registered account associated with the  Issuer
+                  type: string
+                uri:
+                  description: URI is the unique account identifier, which can also
+                    be used to retrieve account details from the CA
+                  type: string
+              type: object
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: orders.certmanager.k8s.io
+spec:
+  additionalPrinterColumns:
+  - JSONPath: .status.state
+    name: State
+    type: string
+  - JSONPath: .spec.issuerRef.name
+    name: Issuer
+    priority: 1
+    type: string
+  - JSONPath: .status.reason
+    name: Reason
+    priority: 1
+    type: string
+  - JSONPath: .metadata.creationTimestamp
+    description: CreationTimestamp is a timestamp representing the server time when
+      this object was created. It is not guaranteed to be set in happens-before order
+      across separate operations. Clients may not set this value. It is represented
+      in RFC3339 form and is in UTC.
+    name: Age
+    type: date
+  group: certmanager.k8s.io
+  names:
+    kind: Order
+    plural: orders
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            commonName:
+              description: CommonName is the common name as specified on the DER encoded
+                CSR. If CommonName is not specified, the first DNSName specified will
+                be used as the CommonName. At least one of CommonName or a DNSNames
+                must be set. This field must match the corresponding field on the
+                DER encoded CSR.
+              type: string
+            config:
+              description: 'Config specifies a mapping from DNS identifiers to how
+                those identifiers should be solved when performing ACME challenges.
+                A config entry must exist for each domain listed in DNSNames and CommonName.
+                Only **one** of ''config'' or ''solvers'' may be specified, and if
+                both are specified then no action will be performed on the Order resource.  This
+                field will be removed when support for solver config specified on
+                the Certificate under certificate.spec.acme has been removed. DEPRECATED:
+                this field will be removed in future. Solver configuration must instead
+                be provided on ACME Issuer resources.'
+              items:
+                properties:
+                  domains:
+                    description: Domains is the list of domains that this SolverConfig
+                      applies to.
+                    items:
+                      type: string
+                    type: array
+                required:
+                - domains
+                type: object
+              type: array
+            csr:
+              description: Certificate signing request bytes in DER encoding. This
+                will be used when finalizing the order. This field must be set on
+                the order.
+              format: byte
+              type: string
+            dnsNames:
+              description: DNSNames is a list of DNS names that should be included
+                as part of the Order validation process. If CommonName is not specified,
+                the first DNSName specified will be used as the CommonName. At least
+                one of CommonName or a DNSNames must be set. This field must match
+                the corresponding field on the DER encoded CSR.
+              items:
+                type: string
+              type: array
+            issuerRef:
+              description: IssuerRef references a properly configured ACME-type Issuer
+                which should be used to create this Order. If the Issuer does not
+                exist, processing will be retried. If the Issuer is not an 'ACME'
+                Issuer, an error will be returned and the Order will be marked as
+                failed.
+              properties:
+                group:
+                  type: string
+                kind:
+                  type: string
+                name:
+                  type: string
+              required:
+              - name
+              type: object
+          required:
+          - csr
+          - issuerRef
+          type: object
+        status:
+          properties:
+            certificate:
+              description: Certificate is a copy of the PEM encoded certificate for
+                this Order. This field will be populated after the order has been
+                successfully finalized with the ACME server, and the order has transitioned
+                to the 'valid' state.
+              format: byte
+              type: string
+            challenges:
+              description: Challenges is a list of ChallengeSpecs for Challenges that
+                must be created in order to complete this Order.
+              items:
+                properties:
+                  authzURL:
+                    description: AuthzURL is the URL to the ACME Authorization resource
+                      that this challenge is a part of.
+                    type: string
+                  config:
+                    description: 'Config specifies the solver configuration for this
+                      challenge. Only **one** of ''config'' or ''solver'' may be specified,
+                      and if both are specified then no action will be performed on
+                      the Challenge resource. DEPRECATED: the ''solver'' field should
+                      be specified instead'
+                    type: object
+                  dnsName:
+                    description: DNSName is the identifier that this challenge is
+                      for, e.g. example.com.
+                    type: string
+                  issuerRef:
+                    description: IssuerRef references a properly configured ACME-type
+                      Issuer which should be used to create this Challenge. If the
+                      Issuer does not exist, processing will be retried. If the Issuer
+                      is not an 'ACME' Issuer, an error will be returned and the Challenge
+                      will be marked as failed.
+                    properties:
+                      group:
+                        type: string
+                      kind:
+                        type: string
+                      name:
+                        type: string
+                    required:
+                    - name
+                    type: object
+                  key:
+                    description: Key is the ACME challenge key for this challenge
+                    type: string
+                  solver:
+                    description: Solver contains the domain solving configuration
+                      that should be used to solve this challenge resource. Only **one**
+                      of 'config' or 'solver' may be specified, and if both are specified
+                      then no action will be performed on the Challenge resource.
+                    properties:
+                      selector:
+                        description: Selector selects a set of DNSNames on the Certificate
+                          resource that should be solved using this challenge solver.
+                        properties:
+                          dnsNames:
+                            description: List of DNSNames that this solver will be
+                              used to solve. If specified and a match is found, a
+                              dnsNames selector will take precedence over a dnsZones
+                              selector. If multiple solvers match with the same dnsNames
+                              value, the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          dnsZones:
+                            description: List of DNSZones that this solver will be
+                              used to solve. The most specific DNS zone match specified
+                              here will take precedence over other DNS zone matches,
+                              so a solver specifying sys.example.com will be selected
+                              over one specifying example.com for the domain www.sys.example.com.
+                              If multiple solvers match with the same dnsZones value,
+                              the solver with the most matching labels in matchLabels
+                              will be selected. If neither has more matches, the solver
+                              defined earlier in the list will be selected.
+                            items:
+                              type: string
+                            type: array
+                          matchLabels:
+                            description: A label selector that is used to refine the
+                              set of certificate's that this challenge solver will
+                              apply to.
+                            type: object
+                        type: object
+                    type: object
+                  token:
+                    description: Token is the ACME challenge token for this challenge.
+                    type: string
+                  type:
+                    description: Type is the type of ACME challenge this resource
+                      represents, e.g. "dns01" or "http01"
+                    type: string
+                  url:
+                    description: URL is the URL of the ACME Challenge resource for
+                      this challenge. This can be used to lookup details about the
+                      status of this challenge.
+                    type: string
+                  wildcard:
+                    description: Wildcard will be true if this challenge is for a
+                      wildcard identifier, for example '*.example.com'
+                    type: boolean
+                required:
+                - authzURL
+                - type
+                - url
+                - dnsName
+                - token
+                - key
+                - wildcard
+                - issuerRef
+                type: object
+              type: array
+            failureTime:
+              description: FailureTime stores the time that this order failed. This
+                is used to influence garbage collection and back-off.
+              format: date-time
+              type: string
+            finalizeURL:
+              description: FinalizeURL of the Order. This is used to obtain certificates
+                for this order once it has been completed.
+              type: string
+            reason:
+              description: Reason optionally provides more information about a why
+                the order is in the current state.
+              type: string
+            state:
+              description: State contains the current state of this Order resource.
+                States 'success' and 'expired' are 'final'
+              enum:
+              - ""
+              - valid
+              - ready
+              - pending
+              - processing
+              - invalid
+              - expired
+              - errored
+              type: string
+            url:
+              description: URL of the Order. This will initially be empty when the
+                resource is first created. The Order controller will populate this
+                field when the Order is first processed. This field will be immutable
+                after it is initially set.
+              type: string
+          type: object
+      required:
+      - metadata
+      - spec
+      - status
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/values.yaml
similarity index 77%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/values.yaml
index d0cfc24..0c6d2cf 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager-crd-chart/values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,10 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+replicaCount: 1
+
+ingress:
+  enabled: false
+labels: {}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager.tf
new file mode 100644
index 0000000..8330b84
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/cert-manager.tf
@@ -0,0 +1,64 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+resource "null_resource" "crd_delay" {
+    provisioner "local-exec" {
+        command = "sleep 120"
+    }
+}
+
+data "template_file" "cert_manager_values" {
+    template = file("./modules/helm_charts/files/cert_manager_values.yaml")
+}
+
+resource "helm_release" "cert_manager_crd" {
+    name       = "cert_manager_crd"
+    chart      = "./modules/helm_charts/cert-manager-crd-chart"
+    wait       = true
+    depends_on = [helm_release.nginx]
+}
+
+data "helm_repository" "jetstack" {
+    name = "jetstack"
+    url  = "https://charts.jetstack.io"
+}
+
+resource "helm_release" "cert-manager" {
+    name       = "cert-manager"
+    repository = data.helm_repository.jetstack.metadata.0.name
+    chart      = "jetstack/cert-manager"
+    namespace  = kubernetes_namespace.cert-manager-namespace.metadata[0].name
+    depends_on = [helm_release.cert_manager_crd]
+    wait       = true
+    version    = "v0.9.0"
+    values     = [
+        data.template_file.cert_manager_values.rendered
+    ]
+}
+
+resource "null_resource" "cert_manager_delay" {
+    provisioner "local-exec" {
+        command = "sleep 120"
+    }
+    triggers = {
+        "after" = helm_release.cert-manager.name
+    }
+}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-billing.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-billing.tf
new file mode 100644
index 0000000..b2c6716
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-billing.tf
@@ -0,0 +1,44 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "dlab_billing_values" {
+  template = file("./modules/helm_charts/dlab-billing-chart/values.yaml")
+  vars = {
+    mongo_db_name           = var.mongo_dbname
+    mongo_user              = var.mongo_db_username
+    mongo_port              = var.mongo_service_port
+    mongo_service_name      = var.mongo_service_name
+    service_base_name       = var.service_base_name
+    big_query_dataset       = var.big_query_dataset
+  }
+}
+
+resource "helm_release" "dlab-billing" {
+    name       = "dlab-billing"
+    chart      = "./modules/helm_charts/dlab-billing-chart"
+    depends_on = [helm_release.mongodb, kubernetes_secret.mongo_db_password_secret, null_resource.cert_manager_delay]
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    wait       = true
+
+    values     = [
+        data.template_file.dlab_billing_values.rendered
+    ]
+}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/cert.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/cert.yaml
new file mode 100644
index 0000000..5762e9a
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/cert.yaml
@@ -0,0 +1,64 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+{{- if not .Values.ui.custom_certs.enabled -}}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+  name: dlab-ui
+  namespace: {{ .Values.namespace }}
+spec:
+  # The secret name to store the signed certificate
+  secretName: {{ include "dlab-ui.fullname" . }}-tls
+  # Common Name
+  commonName: dlab-kubernetes-cluster
+  # DNS SAN
+  dnsNames:
+    - localhost
+    - {{ .Values.ui.ingress.host }}
+  # IP Address SAN
+  ipAddresses:
+    - "127.0.0.1"
+  # Duration of the certificate
+  duration: 24h
+  # Renew 8 hours before the certificate expiration
+  renewBefore: 8h
+  # The reference to the step issuer
+  issuerRef:
+    group: certmanager.step.sm
+    kind: Issuer
+    name: step-issuer
+{{- end }}
+---
+{{- if .Values.ui.custom_certs.enabled -}}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "dlab-ui.fullname" . }}-tls
+  namespace: {{ .Values.namespace }}
+type: kubernetes.io/tls
+data:
+  ca.crt: {{ .Values.ui.custom_certs.ca }}
+  tls.crt: {{ .Values.ui.custom_certs.crt }}
+  tls.key: {{ .Values.ui.custom_certs.key }}
+{{- end }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/configmap-ui-conf.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/configmap-ui-conf.yaml
new file mode 100644
index 0000000..ac96e8b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/configmap-ui-conf.yaml
@@ -0,0 +1,235 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "dlab-ui.fullname" . }}-ui-conf
+data:
+  ssn.yml: |
+    <#assign LOG_ROOT_DIR="/var/opt/dlab/log">
+    <#assign KEYS_DIR="/root/keys">
+    <#assign KEY_STORE_PATH="/root/keys/ssn.keystore.jks">
+    <#assign KEY_STORE_PASSWORD="${SSN_KEYSTORE_PASSWORD}">
+    <#assign TRUST_STORE_PATH="/usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts">
+    <#assign TRUST_STORE_PASSWORD="changeit">
+
+    # Available options are aws, azure, gcp
+    <#assign CLOUD_TYPE="gcp">
+    cloudProvider: ${CLOUD_TYPE}
+
+    #Switch on/off developer mode here
+    <#assign DEV_MODE="false">
+    devMode: ${DEV_MODE}
+
+    mongo:
+      host: {{ .Values.ui.mongo.host }}
+      port: {{ .Values.ui.mongo.port }}
+      username: {{ .Values.ui.mongo.username }}
+      password: ${MONGO_DB_PASSWORD}
+      database: {{ .Values.ui.mongo.db_name }}
+
+    selfService:
+      protocol: https
+      host: localhost
+      port: {{ .Values.ui.service.https_port }}
+      jerseyClient:
+        timeout: 3s
+        connectionTimeout: 3s
+
+    securityService:
+      protocol: https
+      host: localhost
+      port: 8090
+      jerseyClient:
+        timeout: 20s
+        connectionTimeout: 20s
+
+    provisioningService:
+      jerseyClient:
+        timeout: 3s
+        connectionTimeout: 3s
+
+    # Log out user on inactivity
+    inactiveUserTimeoutMillSec: 7200000
+
+  self-service.yml: |
+    <#include "/root/ssn.yml">
+
+    <#if CLOUD_TYPE == "aws">
+    # Minimum and maximum number of slave EMR instances than could be created
+    minEmrInstanceCount: 2
+    maxEmrInstanceCount: 14
+    # Minimum and maximum percentage cost for slave EMR spot instances biding
+    minEmrSpotInstanceBidPct: 20
+    maxEmrSpotInstanceBidPct: 90
+    </#if>
+
+    <#if CLOUD_TYPE == "gcp">
+    # Maximum length for gcp user name (due to gcp restrictions)
+    maxUserNameLength: 10
+    # Minimum and maximum number of slave Dataproc instances that could be created
+    minInstanceCount: 3
+    maxInstanceCount: 15
+    minDataprocPreemptibleCount: 0
+    gcpOuauth2AuthenticationEnabled: false
+    </#if>
+
+    # Boundaries for Spark cluster creation
+    minSparkInstanceCount: 2
+    maxSparkInstanceCount: 14
+
+    # Timeout for check the status of environment via provisioning service
+    checkEnvStatusTimeout: 5m
+
+    # Restrict access to DLab features using roles policy
+    rolePolicyEnabled: true
+    # Default access to DLab features using roles policy
+    roleDefaultAccess: true
+
+    # Set to true to enable the scheduler of billing report.
+    billingSchedulerEnabled: true
+    # Name of configuration file for billing report.
+    <#if DEV_MODE == "true">
+    billingConfFile: ${sys['user.dir']}/../billing/billing.yml
+    <#else>
+    billingConfFile: ${DLAB_CONF_DIR}/billing.yml
+    </#if>
+
+    <#if CLOUD_TYPE == "azure">
+    azureUseLdap: <LOGIN_USE_LDAP>
+    maxSessionDurabilityMilliseconds: 288000000
+    </#if>
+
+    serviceBaseName: {{ .Values.ui.service_base_name }}
+    os: {{ .Values.ui.os }}
+    server:
+      requestLog:
+        appenders:
+        - type: file
+          currentLogFilename: ${LOG_ROOT_DIR}/ssn/request-selfservice.log
+          archive: true
+          archivedLogFilenamePattern: ${LOG_ROOT_DIR}/ssn/request-selfservice-%d{yyyy-MM-dd}.log.gz
+          archivedFileCount: 10
+      rootPath: "/api"
+      applicationConnectors:
+      - type: http
+        port: {{ .Values.ui.service.http_port }}
+      - type: https
+        port: {{ .Values.ui.service.https_port }}
+        certAlias: ssn
+        validateCerts: false
+        keyStorePath: ${KEY_STORE_PATH}
+        keyStorePassword: ${KEY_STORE_PASSWORD}
+        trustStorePath: ${TRUST_STORE_PATH}
+        trustStorePassword: ${TRUST_STORE_PASSWORD}
+      adminConnectors:
+    #    - type: http
+    #      port: 8081
+      - type: https
+        port: 8444
+        certAlias: ssn
+        validateCerts: false
+        keyStorePath: ${KEY_STORE_PATH}
+        keyStorePassword: ${KEY_STORE_PASSWORD}
+        trustStorePath: ${TRUST_STORE_PATH}
+        trustStorePassword: ${TRUST_STORE_PASSWORD}
+
+    mongoMigrationEnabled: false
+
+    logging:
+      level: INFO
+      loggers:
+        com.epam: TRACE
+        com.novemberain: ERROR
+      appenders:
+      - type: console
+      - type: file
+        currentLogFilename: ${LOG_ROOT_DIR}/ssn/selfservice.log
+        archive: true
+        archivedLogFilenamePattern: ${LOG_ROOT_DIR}/ssn/selfservice-%d{yyyy-MM-dd}.log.gz
+        archivedFileCount: 10
+
+    mavenSearchService:
+      protocol: http
+      host: search.maven.org
+      port: 80
+      jerseyClient:
+        timeout: 5s
+        connectionTimeout: 5s
+
+    schedulers:
+      inactivity:
+        enabled: false
+        cron: "0 0 0/2 ? * * *"
+      startComputationalScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      stopComputationalScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      startExploratoryScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      stopExploratoryScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      terminateComputationalScheduler:
+        enabled: true
+        cron: "*/20 * * ? * * *"
+      checkQuoteScheduler:
+        enabled: true
+        cron: "0 0 * ? * * *"
+      checkUserQuoteScheduler:
+        enabled: false
+        cron: "0 0 * ? * * *"
+      checkProjectQuoteScheduler:
+        enabled: true
+        cron: "0 * * ? * * *"
+
+
+    guacamole:
+      connectionProtocol: ssh
+      serverPort: 4822
+      port: 22
+      username: dlab-user
+
+    keycloakConfiguration:
+      redirectUri: {{ .Values.ui.keycloak.redirect_uri }}
+      realm: {{ .Values.ui.keycloak.realm_name }}
+      bearer-only: true
+      auth-server-url: ${KEYCLOAK_AUTH_URL}
+      ssl-required: none
+      register-node-at-startup: true
+      register-node-period: 600
+      resource: {{ .Values.ui.keycloak.client_id }}
+      credentials:
+        secret: ${KEYCLOAK_CLIENT_SECRET}
+
+    jerseyClient:
+      minThreads: 1
+      maxThreads: 128
+      workQueueSize: 8
+      gzipEnabled: true
+      gzipEnabledForRequests: false
+      chunkedEncodingEnabled: true
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/deployment.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/deployment.yaml
new file mode 100644
index 0000000..03c469e
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/deployment.yaml
@@ -0,0 +1,107 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "dlab-ui.fullname" . }}
+  labels:
+{{ include "dlab-ui.labels" . | indent 4 }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ include "dlab-ui.name" . }}
+      app.kubernetes.io/instance: {{ .Release.Name }}
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/name: {{ include "dlab-ui.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+    spec:
+      containers:
+        - name: {{ .Chart.Name }}
+          image: "{{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag }}"
+          imagePullPolicy: {{ .Values.ui.image.pullPolicy }}
+          env:
+            - name: MONGO_DB_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: mongo-db-password
+                  key: password
+            - name: SSN_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: ssn-keystore-password
+                  key: password
+            - name: KEYCLOAK_CLIENT_SECRET
+              valueFrom:
+                secretKeyRef:
+                  name: keycloak-client-secret
+                  key: client_secret
+            - name: KEYCLOAK_AUTH_URL
+              value: {{ .Values.ui.keycloak.auth_server_url }}
+          ports:
+            - name: http
+              containerPort: 80
+              protocol: TCP
+          resources:
+            {{- toYaml .Values.resources | nindent 12 }}
+          volumeMounts:
+            - name: ui-conf
+              mountPath: /root/ssn.yml
+              subPath: ssn
+              readOnly: true
+            - name: ui-conf
+              mountPath: /root/self-service.yml
+              subPath: self-service
+              readOnly: true
+            - mountPath: "/root/step-certs"
+              name: ui-tls
+              readOnly: true
+      volumes:
+        - name: ui-conf
+          configMap:
+            name: {{ include "dlab-ui.fullname" . }}-ui-conf
+            defaultMode: 0644
+            items:
+              - key: ssn.yml
+                path: ssn
+              - key: self-service.yml
+                path: self-service
+        - name: ui-tls
+          secret:
+            secretName: {{ include "dlab-ui.fullname" . }}-tls
+
+      {{- with .Values.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+    {{- with .Values.affinity }}
+      affinity:
+        {{- toYaml . | nindent 8 }}
+    {{- end }}
+    {{- with .Values.tolerations }}
+      tolerations:
+        {{- toYaml . | nindent 8 }}
+    {{- end }}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/ingress.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/ingress.yaml
new file mode 100644
index 0000000..d53fb5e
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/templates/ingress.yaml
@@ -0,0 +1,57 @@
+{{- /*
+# ******************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+{{- if .Values.ui.ingress.enabled -}}
+{{- $fullName := include "dlab-ui.fullname" . -}}
+{{ $servicePort := .Values.ui.service.http_port }}
+{{ $host := .Values.ui.ingress.host }}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: {{ $fullName }}
+  labels:
+{{ include "dlab-ui.labels" . | indent 4 }}
+  annotations:
+{{- with .Values.ui.ingress.annotations }}
+{{ toYaml . | indent 4 }}
+  {{- end }}
+spec:
+{{- if .Values.ui.ingress.tls }}
+  tls:
+  {{- range .Values.ui.ingress.tls }}
+    - hosts:
+      {{- range .hosts }}
+        - {{ . | quote }}
+      {{- end }}
+      secretName: {{ .secretName }}
+  {{- end }}
+{{- end }}
+  rules:
+    - host: {{ $host }}
+      http:
+        paths:
+        - backend:
+            serviceName: {{ $fullName }}
+            servicePort: {{ $servicePort }}
+          path: /
+{{- end }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/values.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/values.yaml
new file mode 100644
index 0000000..4f11f1b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui-chart/values.yaml
@@ -0,0 +1,66 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+# Default values for dlab-ui.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+labels: {}
+namespace: ${namespace}
+
+ui:
+  service_base_name: ${service_base_name}
+  os: ${os}
+  image:
+    repository: epamdlab/ui
+    tag: '0.1-gcp'
+    pullPolicy: Always
+  service:
+    type: ClusterIP
+    #  port: 58443
+    http_port: 80
+    https_port: 443
+  ingress:
+    enabled: true
+    host: ${ssn_k8s_alb_dns_name}
+    annotations:
+      kubernetes.io/ingress.class: nginx
+      nginx.ingress.kubernetes.io/ssl-redirect: "true"
+      nginx.ingress.kubernetes.io/proxy-body-size: "50m"
+    tls:
+      - secretName: dlab-ui-tls
+  mongo:
+    host: ${mongo_service_name}
+    port: ${mongo_port}
+    username: ${mongo_user}
+    db_name: ${mongo_db_name}
+  keycloak:
+    auth_server_url: https://${ssn_k8s_alb_dns_name}/auth
+    redirect_uri: https://${ssn_k8s_alb_dns_name}/
+    realm_name: ${keycloak_realm_name}
+    client_id: ${keycloak_client_id}
+
+  custom_certs:
+    enabled: ${custom_certs_enabled}
+    crt: ${custom_certs_crt}
+    key: ${custom_certs_key}
+    ca: ${step_ca_crt}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui.tf
new file mode 100644
index 0000000..b258a87
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/dlab-ui.tf
@@ -0,0 +1,70 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+    custom_certs_enabled = lower(var.custom_certs_enabled)
+    custom_cert_name     = local.custom_certs_enabled == "true" ? reverse(split("/", var.custom_cert_path))[0] : "None"
+    custom_key_name      = local.custom_certs_enabled == "true" ? reverse(split("/", var.custom_key_path))[0] : "None"
+    custom_cert          = local.custom_certs_enabled == "true" ? base64encode(file("/tmp/${local.custom_cert_name}")) : "None"
+    custom_key           = local.custom_certs_enabled == "true" ? base64encode(file("/tmp/${local.custom_key_name}")) : "None"
+    ui_host              = local.custom_certs_enabled == "true" ? var.custom_certs_host : "${var.service_base_name}-ssn.${var.domain}"
+}
+
+data "template_file" "dlab_ui_values" {
+  template = file("./modules/helm_charts/dlab-ui-chart/values.yaml")
+  vars     = {
+      mongo_db_name          = var.mongo_dbname
+      mongo_user             = var.mongo_db_username
+      mongo_port             = var.mongo_service_port
+      mongo_service_name     = var.mongo_service_name
+      ssn_k8s_alb_dns_name   = local.ui_host
+      service_base_name      = var.service_base_name
+      os                     = var.env_os
+      namespace              = kubernetes_namespace.dlab-namespace.metadata[0].name
+      custom_certs_enabled   = local.custom_certs_enabled
+      custom_certs_crt       = local.custom_cert
+      custom_certs_key       = local.custom_key
+      step_ca_crt            = lookup(data.external.step-ca-config-values.result, "rootCa")
+      keycloak_realm_name    = var.keycloak_realm_name
+      keycloak_client_id     = var.keycloak_client_id
+  }
+}
+
+resource "helm_release" "dlab_ui" {
+    name       = "dlab-ui"
+    chart      = "./modules/helm_charts/dlab-ui-chart"
+    depends_on = [helm_release.mongodb, kubernetes_secret.mongo_db_password_secret, null_resource.step_ca_issuer_delay,
+                  helm_release.external_dns]
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    wait       = true
+
+    values     = [
+        data.template_file.dlab_ui_values.rendered
+    ]
+}
+
+data "kubernetes_service" "ui_service" {
+    metadata {
+        name       = helm_release.dlab_ui.name
+        namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    }
+    depends_on = [helm_release.dlab_ui]
+}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns.tf
new file mode 100644
index 0000000..3a00b4f
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns.tf
@@ -0,0 +1,40 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "external_dns_values" {
+    template = file("./modules/helm_charts/external-dns/values.yaml")
+    vars     = {
+        namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+        project_id = var.project_id
+        domain     = var.domain
+    }
+}
+
+resource "helm_release" "external_dns" {
+    name       = "external-dns"
+    chart      = "./modules/helm_charts/external-dns"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    wait       = true
+    depends_on = [helm_release.nginx]
+    values     = [
+        data.template_file.external_dns_values.rendered
+    ]
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/Chart.yaml
index 16da950..89fe41a 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: external-dns
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/_helpers.tpl b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/_helpers.tpl
new file mode 100644
index 0000000..91e2a65
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "external-dns.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "external-dns.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "external-dns.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "external-dns.labels" -}}
+app.kubernetes.io/name: {{ include "external-dns.name" . }}
+helm.sh/chart: {{ include "external-dns.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/externaldns.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/externaldns.yaml
new file mode 100644
index 0000000..a52bb2e
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/templates/externaldns.yaml
@@ -0,0 +1,82 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "external-dns.fullname" . }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: {{ include "external-dns.fullname" . }}
+rules:
+- apiGroups: [""]
+  resources: ["services"]
+  verbs: ["get","watch","list"]
+- apiGroups: [""]
+  resources: ["pods"]
+  verbs: ["get","watch","list"]
+- apiGroups: ["extensions"]
+  resources: ["ingresses"]
+  verbs: ["get","watch","list"]
+- apiGroups: [""]
+  resources: ["nodes"]
+  verbs: ["list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ include "external-dns.fullname" . }}-viewer
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ include "external-dns.fullname" . }}
+subjects:
+- kind: ServiceAccount
+  name: {{ include "external-dns.fullname" . }}
+  namespace: {{ .Values.namespace }}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: {{ include "external-dns.fullname" . }}
+spec:
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+        app: {{ include "external-dns.fullname" . }}
+    spec:
+      serviceAccountName: {{ include "external-dns.fullname" . }}
+      containers:
+      - name: {{ include "external-dns.fullname" . }}
+        image: registry.opensource.zalan.do/teapot/external-dns:latest
+        args:
+        - --source=ingress
+        - --domain-filter={{ .Values.domain }}
+        - --provider=google
+        - --google-project={{ .Values.project_id }}
+        - --registry=txt
+        - --txt-owner-id=dlab-kubernetes-cluster
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/values.yaml
similarity index 75%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/values.yaml
index d0cfc24..5ed77b1 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/external-dns/values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,9 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+replicaCount: 1
+namespace: ${namespace}
+domain: ${domain}
+project_id: ${project_id}
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/cert_manager_values.yaml
similarity index 76%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/cert_manager_values.yaml
index d0cfc24..688e91f 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/cert_manager_values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,7 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+extraArgs:
+  - --feature-gates=CertificateRequestControllers=true
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/configure_keycloak.sh b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/configure_keycloak.sh
new file mode 100644
index 0000000..309c37c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/configure_keycloak.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+      # *****************************************************************************
+      #
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements.  See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership.  The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License.  You may obtain a copy of the License at
+      #
+      #   http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing,
+      # software distributed under the License is distributed on an
+      # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+      # KIND, either express or implied.  See the License for the
+      # specific language governing permissions and limitations
+      # under the License.
+      #
+      # ******************************************************************************
+
+      # 6 spaces needed as this file will be pasted in keycloak_values.yaml by Terraform
+      set -x
+      auth () {
+          RUN=$(/opt/jboss/keycloak/bin/kcadm.sh config credentials --server http://127.0.0.1:8080/auth --realm master \
+          --user ${keycloak_user} --password ${keycloak_password} > /dev/null && echo "true" || echo "false")
+      }
+      check_realm () {
+          RUN=$(/opt/jboss/keycloak/bin/kcadm.sh get realms/${keycloak_realm_name} > /dev/null && echo "true" || echo "false")
+      }
+      configure_keycloak () {
+          # Create Realm
+          /opt/jboss/keycloak/bin/kcadm.sh create realms -s realm=${keycloak_realm_name} -s enabled=true -s loginTheme=dlab \
+          -s sslRequired=none
+          # Get realm ID
+          dlab_realm_id=$(/opt/jboss/keycloak/bin/kcadm.sh get realms/${keycloak_realm_name} | /usr/bin/jq -r '.id')
+          # Create user federation
+          /opt/jboss/keycloak/bin/kcadm.sh create components -r ${keycloak_realm_name} -s name=dlab-ldap -s providerId=ldap \
+          -s providerType=org.keycloak.storage.UserStorageProvider -s parentId=$dlab_realm_id  -s 'config.priority=["1"]' \
+          -s 'config.fullSyncPeriod=["-1"]' -s 'config.changedSyncPeriod=["-1"]' -s 'config.cachePolicy=["DEFAULT"]' \
+          -s config.evictionDay=[] -s config.evictionHour=[] -s config.evictionMinute=[] -s config.maxLifespan=[] -s \
+          'config.batchSizeForSync=["1000"]' -s 'config.editMode=["READ_ONLY"]' -s 'config.syncRegistrations=["false"]' \
+          -s 'config.vendor=["other"]' -s 'config.usernameLDAPAttribute=["${ldap_usernameAttr}"]' \
+          -s 'config.rdnLDAPAttribute=["${ldap_rdnAttr}"]' -s 'config.uuidLDAPAttribute=["${ldap_uuidAttr}"]' \
+          -s 'config.userObjectClasses=["inetOrgPerson, organizationalPerson"]' \
+          -s 'config.connectionUrl=["ldap://${ldap_host}:389"]'  -s 'config.usersDn=["${ldap_users_group},${ldap_dn}"]' \
+          -s 'config.authType=["simple"]' -s 'config.bindDn=["${ldap_user},${ldap_dn}"]' \
+          -s 'config.bindCredential=["${ldap_bind_creds}"]' -s 'config.searchScope=["1"]' \
+          -s 'config.useTruststoreSpi=["ldapsOnly"]' -s 'config.connectionPooling=["true"]' \
+          -s 'config.pagination=["true"]' --server http://127.0.0.1:8080/auth
+          # Get user federation ID
+          user_f_id=$(/opt/jboss/keycloak/bin/kcadm.sh get components -r ${keycloak_realm_name} --query name=dlab-ldap | /usr/bin/jq -er '.[].id')
+          # Create user federation email mapper
+          /opt/jboss/keycloak/bin/kcadm.sh create components -r ${keycloak_realm_name} -s name=uid-attribute-to-email-mapper \
+          -s providerId=user-attribute-ldap-mapper -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper \
+          -s parentId=$user_f_id -s 'config."user.model.attribute"=["email"]' \
+          -s 'config."ldap.attribute"=["uid"]' -s 'config."read.only"=["false"]' \
+          -s 'config."always.read.value.from.ldap"=["false"]' -s 'config."is.mandatory.in.ldap"=["false"]'
+          # Create user federation group mapper
+          /opt/jboss/keycloak/bin/kcadm.sh create components -r ${keycloak_realm_name} -s name=group_mapper -s providerId=group-ldap-mapper \
+          -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper -s parentId=$user_f_id \
+          -s 'config."groups.dn"=["ou=Groups,${ldap_dn}"]' -s 'config."group.name.ldap.attribute"=["cn"]' \
+          -s 'config."group.object.classes"=["posixGroup"]' -s 'config."preserve.group.inheritance"=["false"]' \
+          -s 'config."membership.ldap.attribute"=["memberUid"]' -s 'config."membership.attribute.type"=["UID"]' \
+          -s 'config."groups.ldap.filter"=[]' -s 'config.mode=["IMPORT"]' \
+          -s 'config."user.roles.retrieve.strategy"=["LOAD_GROUPS_BY_MEMBER_ATTRIBUTE"]' \
+          -s 'config."mapped.group.attributes"=[]' -s 'config."drop.non.existing.groups.during.sync"=["false"]'
+          # Create client
+          /opt/jboss/keycloak/bin/kcadm.sh create clients -r ${keycloak_realm_name} -s clientId=${keycloak_client_id} -s enabled=true -s \
+          'redirectUris=["https://${ssn_k8s_alb_dns_name}/"]' -s secret=${keycloak_client_secret} -s \
+          serviceAccountsEnabled=true
+          # Get clint ID
+          client_id=$(/opt/jboss/keycloak/bin/kcadm.sh get clients -r ${keycloak_realm_name} --query clientId=${keycloak_client_id} | /usr/bin/jq -er '.[].id')
+          # Create client mapper
+          /opt/jboss/keycloak/bin/kcadm.sh create clients/$client_id/protocol-mappers/models \
+          -r ${keycloak_realm_name} -s name=group_mapper -s protocol=openid-connect -s protocolMapper="oidc-group-membership-mapper" \
+          -s 'config."full.path"="false"' -s 'config."id.token.claim"="true"' -s 'config."access.token.claim"="true"' \
+          -s 'config."claim.name"="groups"' -s 'config."userinfo.token.claim"="true"'
+      }
+      main_func () {
+          hostname=$(hostname)
+          # Authentication
+          count=0
+          if [[ $hostname != "keycloak-0" ]];
+          then
+            echo "Skipping startup script!"
+            exit 0
+          fi
+          while auth
+          do
+          if [[ $RUN == "false" ]] && (( $count < 120 ));
+          then
+              echo "Waiting for Keycloak..."
+              sleep 5
+              count=$((count + 1))
+          elif [[ $RUN == "true" ]];
+          then
+              echo "Authenticated!"
+              break
+          else
+              echo "Timeout error!"
+              exit 1
+          fi
+          done
+          # Check if resource is already exist
+          check_realm
+          # Create resource if it isn't created
+          if [[ $RUN == "false" ]];
+          then
+              configure_keycloak
+          else
+              echo "Realm is already exist!"
+          fi
+      }
+      main_func &
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/get_configmap_values.sh b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/get_configmap_values.sh
new file mode 100644
index 0000000..3085eb7
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/get_configmap_values.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+creds_file_path=$1
+gke_name=$2
+region=$3
+project_id=$4
+
+gcloud auth activate-service-account --key-file "$creds_file_path"
+export KUBECONFIG=/tmp/config; gcloud beta container clusters get-credentials "$gke_name" --region "$region" --project "$project_id"
+ROOT_CA=$(kubectl get -o jsonpath="{.data['root_ca\.crt']}" configmaps/step-certificates-certs -ndlab | base64 | tr -d '\n')
+KID=$(kubectl get -o jsonpath="{.data['ca\.json']}" configmaps/step-certificates-config -ndlab | jq -r .authority.provisioners[].key.kid)
+KID_NAME=$(kubectl get -o jsonpath="{.data['ca\.json']}" configmaps/step-certificates-config -ndlab | jq -r .authority.provisioners[].name)
+jq -n --arg rootCa "$ROOT_CA" --arg kid "$KID" --arg kidName "$KID_NAME" '{rootCa: $rootCa, kid: $kid, kidName: $kidName}'
+unset KUBECONFIG
+rm /tmp/config
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/keycloak_values.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/keycloak_values.yaml
new file mode 100644
index 0000000..569e4e7
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/keycloak_values.yaml
@@ -0,0 +1,80 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+keycloak:
+  image:
+    tag: "7.0.0"
+  replicas: 1
+  basepath: auth
+  username: ${keycloak_user}
+  password: "${keycloak_password}"
+
+  persistence:
+    dbVendor: mysql
+    dbName: ${mysql_db_name}
+    dbHost: keycloak-mysql
+    dbPort: 3306
+    dbUser: ${mysql_user}
+    dbPassword: "${mysql_user_password}"
+
+  service:
+    type: ClusterIP
+    # nodePort: 31088
+
+  ingress:
+    enabled: true
+    annotations:
+      kubernetes.io/ingress.class: nginx
+      nginx.ingress.kubernetes.io/ssl-redirect: "true"
+      nginx.ingress.kubernetes.io/rewrite-target: /auth
+    path: /auth
+    hosts:
+      - ${ssn_k8s_alb_dns_name}
+    tls:
+      - hosts:
+          - ${ssn_k8s_alb_dns_name}
+        secretName: dlab-ui-tls
+
+  startupScripts:
+    mystartup.sh: |
+      ${configure_keycloak_file}
+
+  extraInitContainers: |
+    - name: theme-provider
+      image: epamdlab/ui-theme:0.1
+      imagePullPolicy: Always
+      command:
+        - sh
+      args:
+        - -c
+        - |
+          echo "Copying theme..."
+          cp -R /dlab/* /theme
+      volumeMounts:
+        - name: theme
+          mountPath: /theme
+  extraVolumeMounts: |
+    - name: theme
+      mountPath: /opt/jboss/keycloak/themes/dlab
+
+  extraVolumes: |
+    - name: theme
+      emptyDir: {}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mongo_values.yaml
similarity index 75%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mongo_values.yaml
index 16da950..e4bdfb8 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mongo_values.yaml
@@ -19,19 +19,20 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+replicaSet:
+  enabled: true
 
+mongodbRootPassword: "${mongo_root_pwd}"
+mongodbUsername: ${mongo_db_username}
+mongodbDatabase: ${mongo_dbname}
+mongodbPassword: "${mongo_db_pwd}"
 
-USER root
+image:
+  tag: ${mongo_image_tag}
 
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
+persistence:
+  enabled: false
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+service:
+  type: ClusterIP
+  port: ${mongo_service_port}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mysql_values.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mysql_values.yaml
index 16da950..c9a82bc 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/mysql_values.yaml
@@ -19,19 +19,12 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+mysqlRootPassword: "${mysql_root_password}"
+mysqlUser: ${mysql_user}
+mysqlPassword: "${mysql_user_password}"
+mysqlDatabase: ${mysql_db_name}
+imageTag: "5.7.14"
+persistence:
+  enabled: true
+  size: ${mysql_disk_size}Gi
+  storageClass: ${storage_class}
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/nginx_values.yaml
similarity index 78%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/nginx_values.yaml
index d0cfc24..a484a42 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/files/nginx_values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,8 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+controller:
+  service:
+    type: LoadBalancer
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/keycloak.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/keycloak.tf
new file mode 100644
index 0000000..ebd6d11
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/keycloak.tf
@@ -0,0 +1,74 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "configure_keycloak" {
+  template = file("./modules/helm_charts/files/configure_keycloak.sh")
+  vars     = {
+    ssn_k8s_alb_dns_name   = local.ui_host
+    keycloak_user          = var.keycloak_user
+    keycloak_password      = random_string.keycloak_password.result
+    keycloak_client_secret = random_uuid.keycloak_client_secret.result
+    ldap_usernameAttr      = var.ldap_usernameAttr
+    ldap_rdnAttr           = var.ldap_rdnAttr
+    ldap_uuidAttr          = var.ldap_uuidAttr
+    ldap_host              = var.ldap_host
+    ldap_users_group       = var.ldap_users_group
+    ldap_dn                = var.ldap_dn
+    ldap_user              = var.ldap_user
+    ldap_bind_creds        = var.ldap_bind_creds
+    keycloak_realm_name    = var.keycloak_realm_name
+    keycloak_client_id     = var.keycloak_client_id
+  }
+}
+
+data "template_file" "keycloak_values" {
+  template = file("./modules/helm_charts/files/keycloak_values.yaml")
+  vars     = {
+    keycloak_user           = var.keycloak_user
+    keycloak_password       = random_string.keycloak_password.result
+    ssn_k8s_alb_dns_name    = local.ui_host
+    configure_keycloak_file = data.template_file.configure_keycloak.rendered
+    mysql_db_name           = var.mysql_db_name
+    mysql_user              = var.mysql_user
+    mysql_user_password     = random_string.mysql_keycloak_user_password.result
+    # replicas_count          = var.ssn_k8s_workers_count > 3 ? 3 : var.ssn_k8s_workers_count
+  }
+}
+
+data "helm_repository" "codecentric" {
+    name = "codecentric"
+    url  = "https://codecentric.github.io/helm-charts"
+}
+
+resource "helm_release" "keycloak" {
+  name       = "keycloak"
+  repository = data.helm_repository.codecentric.metadata.0.name
+  chart      = "codecentric/keycloak"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  wait       = true
+  timeout    = 600
+
+  values     = [
+    data.template_file.keycloak_values.rendered
+  ]
+  depends_on = [helm_release.keycloak-mysql, kubernetes_secret.keycloak_password_secret, helm_release.nginx,
+                helm_release.dlab_ui]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/main.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/main.tf
new file mode 100644
index 0000000..300e729
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/main.tf
@@ -0,0 +1,96 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "google_container_cluster" "ssn_k8s_gke_cluster" {
+  name       = var.gke_cluster_name
+  location   = var.region
+  depends_on = []
+}
+
+data "google_client_config" "current" {}
+
+provider "helm" {
+
+  kubernetes {
+    host                   = data.google_container_cluster.ssn_k8s_gke_cluster.endpoint
+    token                  = data.google_client_config.current.access_token
+    client_certificate     = base64decode(data.google_container_cluster.ssn_k8s_gke_cluster.master_auth.0.client_certificate)
+    client_key             = base64decode(data.google_container_cluster.ssn_k8s_gke_cluster.master_auth.0.client_key)
+    cluster_ca_certificate = base64decode(data.google_container_cluster.ssn_k8s_gke_cluster.master_auth.0.cluster_ca_certificate)
+  }
+  install_tiller = true
+  service_account = kubernetes_service_account.tiller_sa.metadata.0.name
+}
+
+provider "kubernetes" {
+  host = data.google_container_cluster.ssn_k8s_gke_cluster.endpoint
+
+  client_certificate     = base64decode(data.google_container_cluster.ssn_k8s_gke_cluster.master_auth.0.client_certificate)
+  client_key             = base64decode(data.google_container_cluster.ssn_k8s_gke_cluster.master_auth.0.client_key)
+  cluster_ca_certificate = base64decode(data.google_container_cluster.ssn_k8s_gke_cluster.master_auth.0.cluster_ca_certificate)
+}
+
+resource "kubernetes_service_account" "tiller_sa" {
+  metadata {
+    name = "tiller"
+    namespace = "kube-system"
+  }
+}
+
+resource "kubernetes_role_binding" "tiller_rb" {
+  metadata {
+    name      = "tiller"
+    namespace = "kube-system"
+  }
+  role_ref {
+    api_group = "rbac.authorization.k8s.io"
+    kind      = "ClusterRole"
+    name      = "cluster-admin"
+  }
+  subject {
+    kind      = "ServiceAccount"
+    name      = "tiller"
+    namespace = "kube-system"
+  }
+}
+
+resource "kubernetes_namespace" "dlab-namespace" {
+  metadata {
+    annotations = {
+      name = var.namespace_name
+    }
+
+    name = var.namespace_name
+  }
+}
+
+resource "kubernetes_namespace" "cert-manager-namespace" {
+  metadata {
+    annotations = {
+      name = "cert-manager"
+    }
+    labels = {
+      "certmanager.k8s.io/disable-validation" = "true"
+    }
+
+    name = "cert-manager"
+  }
+}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mongo.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mongo.tf
new file mode 100644
index 0000000..f8a020b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mongo.tf
@@ -0,0 +1,54 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "mongo_values" {
+  template = file("./modules/helm_charts/files/mongo_values.yaml")
+  vars     = {
+      mongo_root_pwd      = random_string.mongo_root_password.result
+      mongo_db_username   = var.mongo_db_username
+      mongo_dbname        = var.mongo_dbname
+      mongo_db_pwd        = random_string.mongo_db_password.result
+      mongo_image_tag     = var.mongo_image_tag
+      mongo_service_port  = var.mongo_service_port
+      mongo_node_port     = var.mongo_node_port
+      load_balancer_ip    = data.kubernetes_service.nginx_service.load_balancer_ingress.0.ip
+  }
+}
+
+resource "helm_release" "mongodb" {
+    name       = "mongo-ha"
+    chart      = "stable/mongodb"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    wait       = true
+    values     = [
+        data.template_file.mongo_values.rendered
+    ]
+    depends_on = [helm_release.nginx, kubernetes_secret.mongo_db_password_secret,
+                  kubernetes_secret.mongo_root_password_secret]
+}
+
+data "kubernetes_service" "mongo_service" {
+    metadata {
+        name       = "${helm_release.mongodb.name}-mongodb"
+        namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    }
+    depends_on = [helm_release.mongodb]
+}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mysql.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mysql.tf
new file mode 100644
index 0000000..e6afb3f
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/mysql.tf
@@ -0,0 +1,45 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "mysql_values" {
+  template = file("./modules/helm_charts/files/mysql_values.yaml")
+  vars     = {
+    mysql_root_password = random_string.mysql_root_password.result
+    mysql_user          = var.mysql_user
+    mysql_user_password = random_string.mysql_keycloak_user_password.result
+    mysql_db_name       = var.mysql_db_name
+    storage_class       = "standard"
+    mysql_disk_size     = var.mysql_disk_size
+  }
+}
+
+resource "helm_release" "keycloak-mysql" {
+  name       = "keycloak-mysql"
+  chart      = "stable/mysql"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  wait       = true
+  values     = [
+    data.template_file.mysql_values.rendered
+  ]
+  depends_on = [kubernetes_secret.mysql_root_password_secret, kubernetes_secret.mysql_keycloak_user_password_secret,
+                helm_release.nginx]
+}
+
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/Chart.yaml
index 16da950..ef4a57c 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: nginx-default-backend
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/_helpers.tpl b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/_helpers.tpl
new file mode 100644
index 0000000..cb92b6c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "nginx-default-backend.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "nginx-default-backend.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "nginx-default-backend.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "nginx-default-backend.labels" -}}
+app.kubernetes.io/name: {{ include "nginx-default-backend.name" . }}
+helm.sh/chart: {{ include "nginx-default-backend.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/nginx-default-backend.yaml
similarity index 66%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/nginx-default-backend.yaml
index 951fdd7..9a4f3be 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/templates/nginx-default-backend.yaml
@@ -1,3 +1,4 @@
+{{- /*
 # *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -18,23 +19,31 @@
 # under the License.
 #
 # ******************************************************************************
+*/ -}}
 
-FROM openjdk:8-alpine
+apiVersion: v1
+kind: Service
+metadata:
+  name: nginx-default-backend
+spec:
+  ports:
+  - port: 80
+    targetPort: 8080
+  selector:
+    app: nginx-default-backend
 
+---
 
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: nginx-default-backend
+spec:
+  template:
+    metadata:
+      labels:
+        app: nginx-default-backend
+    spec:
+      containers:
+      - name: nginx-default-backend
+        image: gcr.io/google_containers/defaultbackend:1.3
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/values.yaml
similarity index 80%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/values.yaml
index d0cfc24..b2591c4 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-default-backend/values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,7 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+replicaCount: 1
+
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/Chart.yaml
index 16da950..0255ae4 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: nginx-ingress
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/_helpers.tpl b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/_helpers.tpl
new file mode 100644
index 0000000..90474ff
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "nginx-ingress.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "nginx-ingress.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "nginx-ingress.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "nginx-ingress.labels" -}}
+app.kubernetes.io/name: {{ include "nginx-ingress.name" . }}
+helm.sh/chart: {{ include "nginx-ingress.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/nginx-ingress.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/nginx-ingress.yaml
new file mode 100644
index 0000000..f8acfe0
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/templates/nginx-ingress.yaml
@@ -0,0 +1,70 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: nginx-ingress
+spec:
+  type: LoadBalancer
+  ports:
+  - name: http
+    port: 80
+    targetPort: 80
+  - name: https
+    port: 443
+    targetPort: 443
+  selector:
+    app: nginx-ingress
+
+---
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: nginx-ingress
+spec:
+  template:
+    metadata:
+      labels:
+        app: nginx-ingress
+    spec:
+      containers:
+      - name: nginx-ingress
+        image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.3
+        args:
+        - /nginx-ingress-controller
+        - --default-backend-service=dlab/nginx-default-backend
+        - --publish-service=dlab/nginx-ingress
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                fieldPath: metadata.namespace
+        ports:
+        - containerPort: 80
+        - containerPort: 443
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/values.yaml
similarity index 80%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/values.yaml
index d0cfc24..b2591c4 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx-ingress/values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,7 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+replicaCount: 1
+
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx.tf
new file mode 100644
index 0000000..e03bb97
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/nginx.tf
@@ -0,0 +1,58 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "nginx-default-backend" {
+  template = file("./modules/helm_charts/nginx-default-backend/values.yaml")
+}
+
+resource "helm_release" "nginx-default-backend" {
+    name       = "nginx-default-backend"
+    chart      = "./modules/helm_charts/nginx-default-backend"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    wait       = true
+    depends_on = [null_resource.crd_delay]
+    values     = [
+        data.template_file.nginx-default-backend.rendered
+    ]
+}
+
+data "template_file" "nginx-ingress" {
+  template = file("./modules/helm_charts/nginx-ingress/values.yaml")
+}
+
+resource "helm_release" "nginx" {
+    name       = "nginx-ingress"
+    chart      = "./modules/helm_charts/nginx-ingress"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    wait       = true
+    depends_on = [helm_release.nginx-default-backend]
+    values     = [
+        data.template_file.nginx-ingress.rendered
+    ]
+}
+
+data "kubernetes_service" "nginx_service" {
+    metadata {
+        name       = helm_release.nginx.name
+        namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+    }
+    depends_on     = [helm_release.nginx]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/outputs.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/outputs.tf
new file mode 100644
index 0000000..154527d
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/outputs.tf
@@ -0,0 +1,53 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+output "keycloak_client_secret" {
+    value = random_uuid.keycloak_client_secret.result
+}
+
+output "keycloak_client_id" {
+    value = "dlab-ui"
+}
+
+output "keycloak_user_password" {
+    value = random_string.keycloak_password.result
+}
+
+output "ssn_ui_host" {
+    value = local.ui_host
+}
+
+output "step_root_ca" {
+    value = lookup(data.external.step-ca-config-values.result, "rootCa")
+}
+
+output "step_kid" {
+    value = lookup(data.external.step-ca-config-values.result, "kid")
+}
+
+output "step_kid_password" {
+    value = random_string.step_ca_provisioner_password.result
+}
+
+output "step_ca_url" {
+    value = "https://${kubernetes_service.step_service_lb.load_balancer_ingress.0.ip}"
+}
+
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/secrets.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/secrets.tf
new file mode 100644
index 0000000..5a78c41
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/secrets.tf
@@ -0,0 +1,162 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+resource "random_uuid" "keycloak_client_secret" {}
+
+resource "random_string" "ssn_keystore_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "keycloak_client_secret" {
+  metadata {
+    name       = "keycloak-client-secret"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    client_secret = random_uuid.keycloak_client_secret.result
+  }
+}
+
+resource "random_string" "keycloak_password" {
+  length = 16
+  special = false
+}
+
+
+resource "kubernetes_secret" "keycloak_password_secret" {
+  metadata {
+    name       = "keycloak-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.keycloak_password.result
+  }
+}
+
+resource "random_string" "mongo_root_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mongo_root_password_secret" {
+  metadata {
+    name       = "mongo-root-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mongo_root_password.result
+  }
+}
+
+resource "random_string" "mongo_db_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mongo_db_password_secret" {
+  metadata {
+    name       = "mongo-db-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mongo_db_password.result
+  }
+}
+
+resource "random_string" "mysql_root_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mysql_root_password_secret" {
+  metadata {
+    name       = "mysql-root-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mysql_root_password.result
+  }
+}
+
+resource "random_string" "mysql_keycloak_user_password" {
+  length = 16
+  special = false
+}
+
+resource "kubernetes_secret" "mysql_keycloak_user_password_secret" {
+  metadata {
+    name       = "mysql-keycloak-user-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.mysql_keycloak_user_password.result
+  }
+}
+
+resource "kubernetes_secret" "ssn_keystore_password" {
+  metadata {
+    name       = "ssn-keystore-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.ssn_keystore_password.result
+  }
+}
+
+resource "random_string" "step_ca_password" {
+  length = 8
+  special = false
+}
+
+resource "kubernetes_secret" "step_ca_password_secret" {
+  metadata {
+    name       = "step-ca-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.step_ca_password.result
+  }
+}
+
+resource "random_string" "step_ca_provisioner_password" {
+  length = 8
+  special = false
+}
+
+resource "kubernetes_secret" "step_ca_provisioner_password_secret" {
+  metadata {
+    name       = "step-ca-provisioner-password"
+    namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+
+  data = {
+    password = random_string.step_ca_provisioner_password.result
+  }
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/.helmignore
index 951fdd7..2f795d4 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/Chart.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/Chart.yaml
new file mode 100644
index 0000000..e9d93e2
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/Chart.yaml
@@ -0,0 +1,52 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+apiVersion: v1
+appVersion: 0.13.2
+description: An online certificate authority and related tools for secure automated
+  certificate management, so you can use TLS everywhere.
+engine: gotpl
+home: https://smallstep.com
+icon: https://raw.githubusercontent.com/smallstep/certificates/master/icon.png
+keywords:
+  - acme
+- authority
+- ca
+- certificate
+- certificates
+- certificate-authority
+- kubernetes
+- pki
+- security
+- security-tools
+- smallstep
+- ssh
+- step
+- step-ca
+- tls
+- x509
+maintainers:
+- email: mariano@smallstep.com
+  name: Mariano Cano
+name: step-certificates
+sources:
+- https://github.com/smallstep/certificates
+version: 1.13.2
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/NOTES.txt b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/NOTES.txt
new file mode 100644
index 0000000..43f6544
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/NOTES.txt
@@ -0,0 +1,13 @@
+
+Thanks for installing Step CA.
+
+1. Get the PKI and Provisioner secrets running these commands:
+   kubectl get -n {{ .Release.Namespace }} -o jsonpath='{.data.password}' secret/{{ include "step-certificates.fullname" . }}-ca-password | base64 --decode
+   kubectl get -n {{ .Release.Namespace }} -o jsonpath='{.data.password}' secret/{{ include "step-certificates.fullname" . }}-provisioner-password | base64 --decode
+{{ if .Release.IsInstall }}
+2. Get the CA URL and the root certificate fingerprint running this command:
+   kubectl -n {{ .Release.Namespace }} logs job.batch/{{ .Release.Name }}
+
+3. Delete the configuration job running this command:
+   kubectl -n {{ .Release.Namespace }} delete job.batch/{{ .Release.Name }}
+{{ end -}}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..b65f748
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/_helpers.tpl
@@ -0,0 +1,88 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "step-certificates.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "step-certificates.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "step-certificates.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "step-certificates.labels" -}}
+helm.sh/chart: {{ include "step-certificates.chart" . }}
+app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Create CA URL
+*/}}
+{{- define "step-certificates.url" -}}
+{{- if .Values.ca.url -}}
+{{- .Values.ca.url -}}
+{{- else -}}
+{{- printf "https://%s.%s.svc.cluster.local" (include "step-certificates.fullname" .) .Release.Namespace -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create CA DNS
+*/}}
+{{- define "step-certificates.dns" -}}
+{{- if .Values.ca.dns -}}
+{{- .Values.ca.dns -}}
+{{- else -}}
+{{- printf "%s.%s.svc.cluster.local,127.0.0.1" (include "step-certificates.fullname" .) .Release.Namespace -}}
+{{- end -}}
+{{- end -}}l
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/bootstrap.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/bootstrap.yaml
new file mode 100644
index 0000000..354c144
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/bootstrap.yaml
@@ -0,0 +1,60 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+  {{- if .Release.IsInstall -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: "{{.Release.Name}}"
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+spec:
+  template:
+    metadata:
+      name: "{{.Release.Name}}"
+      labels:
+        app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+    spec:
+      serviceAccountName: {{ include "step-certificates.fullname" . }}-config
+      restartPolicy: Never
+      volumes:
+        - name: bootstrap
+          configMap:
+            name: {{ include "step-certificates.fullname" . }}-bootstrap
+      containers:
+        - name: config
+          image: "{{ .Values.bootstrapImage.repository }}:{{ .Values.bootstrapImage.tag }}"
+          imagePullPolicy: {{ .Values.bootstrapImage.pullPolicy }}
+          command: ["/bin/sh", "/home/step/bootstrap/bootstrap.sh"]
+          volumeMounts:
+            - name: bootstrap
+              mountPath: /home/step/bootstrap
+              readOnly: true
+{{- end -}}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/ca.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/ca.yaml
new file mode 100644
index 0000000..2551cc5
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/ca.yaml
@@ -0,0 +1,159 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{ include "step-certificates.fullname" . }}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+spec:
+  # Only one replica is supported at this moment
+  # Requested {{ .Values.replicaCount }}
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+      app.kubernetes.io/instance: {{ .Release.Name }}
+  serviceName: {{ include "step-certificates.fullname" . }}
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+        app: {{ include "step-certificates.fullname" . }}
+    spec:
+{{- if .Release.IsInstall }}
+initContainers:
+  - name: {{ .Chart.Name }}-init
+    image: busybox:latest
+    imagePullPolicy: {{ .Values.image.pullPolicy }}
+    command: ["sleep", "20"]
+{{- end }}
+securityContext:
+  {{- if .Values.ca.runAsRoot }}
+  runAsUser: 0
+  {{- else }}
+  runAsUser: 1000
+  runAsNonRoot: true
+  runAsGroup: 1000
+  fsGroup: 1000
+  allowPrivilegeEscalation: false
+  {{- end }}
+containers:
+  - name: {{ .Chart.Name }}
+    image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+    imagePullPolicy: {{ .Values.image.pullPolicy }}
+    command: ["/usr/local/bin/step-ca",
+              "--password-file", "/home/step/secrets/passwords/password",
+              "/home/step/config/ca.json"]
+    env:
+      - name: NAMESPACE
+        value: "{{ .Release.Namespace }}"
+    ports:
+      - name: https
+        containerPort: {{ .Values.service.targetPort }}
+        protocol: TCP
+    livenessProbe:
+      initialDelaySeconds: 5
+      httpGet:
+        path: /health
+        port: {{ .Values.service.targetPort }}
+        scheme: HTTPS
+    readinessProbe:
+      initialDelaySeconds: 5
+      httpGet:
+        path: /health
+        port: {{ .Values.service.targetPort }}
+        scheme: HTTPS
+    resources:
+      {{- toYaml .Values.resources | nindent 12 }}
+    volumeMounts:
+      - name: certs
+        mountPath: /home/step/certs
+        readOnly: true
+      - name: config
+        mountPath: /home/step/config
+        readOnly: true
+      - name: secrets
+        mountPath: /home/step/secrets
+        readOnly: true
+      - name: ca-password
+        mountPath: /home/step/secrets/passwords
+        readOnly: true
+    {{- if .Values.ca.db.enabled }}
+    - name: database
+      mountPath: /home/step/db
+      readOnly: false
+    {{- end }}
+volumes:
+  - name: certs
+    configMap:
+      name: {{ include "step-certificates.fullname" . }}-certs
+  - name: config
+configMap:
+  name: {{ include "step-certificates.fullname" . }}-config
+  - name: secrets
+configMap:
+  name: {{ include "step-certificates.fullname" . }}-secrets
+  - name: ca-password
+secret:
+  secretName: {{ include "step-certificates.fullname" . }}-ca-password
+  {{- if and .Values.ca.db.enabled (not .Values.ca.db.persistent) }}
+  - name: database
+emptyDir: {}
+  {{- end }}
+  {{- with .Values.nodeSelector }}
+nodeSelector:
+  {{- toYaml . | nindent 8 }}
+  {{- end }}
+  {{- with .Values.affinity }}
+affinity:
+  {{- toYaml . | nindent 8 }}
+  {{- end }}
+  {{- with .Values.tolerations }}
+tolerations:
+  {{- toYaml . | nindent 8 }}
+  {{- end }}
+{{- if and .Values.ca.db.enabled .Values.ca.db.persistent }}
+volumeClaimTemplates:
+  - metadata:
+      name: database
+      labels:
+        app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+        app.kubernetes.io/instance: {{ .Release.Name }}
+        app.kubernetes.io/managed-by: {{ .Release.Service }}
+    spec:
+      accessModes:
+      {{- range .Values.ca.db.accessModes }}
+      - {{ . | quote }}
+      {{- end }}
+      resources:
+        requests:
+          storage: {{ .Values.ca.db.size | quote }}
+    {{- if .Values.ca.db.storageClass }}
+    {{- if (eq "-" .Values.ca.db.storageClass) }}
+    storageClassName: ""
+    {{- else }}
+    storageClassName: {{ .Values.ca.db.storageClass | quote }}
+    {{- end }}
+    {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/configmaps.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/configmaps.yaml
new file mode 100644
index 0000000..1670d9a
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/configmaps.yaml
@@ -0,0 +1,167 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+# ConfigMaps that will be updated by the configuration job:
+# 1. Step CA config directory.
+# 2. Step CA certs direcotry.
+# 3. Step CA secrets directory.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-certs
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: v1
+data:
+  intermediate_ca_key: ""
+  root_ca_key: ""
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-secrets
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-bootstrap
+  namespace: {{.Release.Namespace}}
+  labels:
+{{ include "step-certificates.labels" . | indent 4 }}
+data:
+  bootstrap.sh: |-
+    #!/bin/sh
+    STEPPATH=/home/step
+    echo -e "\e[1mWelcome to Step Certificates configuration.\e[0m\n"
+
+    function permission_error () {
+      echo -e "\033[0;31mPERMISSION ERROR:\033[0m $1\n"
+      exit 1
+    }
+
+    function kbreplace() {
+      kubectl $@ -o yaml --dry-run | kubectl replace -f -
+    }
+
+    echo -e "\e[1mConfiguring kubctl with service account...\e[0m"
+    # Use the service account context
+    kubectl config set-cluster cfc --server=https://kubernetes.default --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    kubectl config set-credentials bootstrap --token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
+    kubectl config set-context cfc --cluster=cfc --user=bootstrap
+    kubectl config use-context cfc
+
+    echo -e "\n\e[1mChecking cluster permissions...\e[0m"
+    echo -n "Checking for permission to create configmaps in {{.Release.Namespace}} namespace: "
+    kubectl auth can-i create configmaps --namespace {{.Release.Namespace}}
+    if [ $? -ne 0 ]; then
+      permission_error "create configmaps"
+    fi
+
+    echo -n "Checking for permission to create secrets in {{.Release.Namespace}} namespace: "
+    kubectl auth can-i create secrets --namespace {{.Release.Namespace}}
+    if [ $? -ne 0 ]; then
+      permission_error "create secrets"
+    fi
+{{ if .Values.autocert.enabled }}
+echo -n "Checking for permission to create mutatingwebhookconfiguration in {{.Release.Namespace}} namespace: "
+    kubectl auth can-i create mutatingwebhookconfiguration --namespace {{.Release.Namespace}}
+    if [ $? -ne 0 ]; then
+      permission_error "create mutatingwebhookconfiguration"
+  fi
+{{- end }}
+
+# Setting this here on purpose, after the above section which explicitly checks
+# for and handles exit errors.
+  set -e
+
+  echo -e "\n\e[1mInitializating the CA...\e[0m"
+
+# CA password
+{{- if .Values.ca.password }}
+  CA_PASSWORD={{ quote .Values.ca.password }}
+{{- else }}
+  CA_PASSWORD=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32 ; echo '')
+{{- end }}
+# Provisioner password
+{{- if .Values.ca.provisioner.password }}
+  CA_PROVISIONER_PASSWORD={{ quote .Values.ca.provisioner.password }}
+{{- else }}
+  CA_PROVISIONER_PASSWORD=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32 ; echo '')
+{{- end }}
+
+  TMP_CA_PASSWORD=$(mktemp /tmp/autocert.XXXXXX)
+  TMP_CA_PROVISIONER_PASSWORD=$(mktemp /tmp/autocert.XXXXXX)
+
+  echo $CA_PASSWORD > $TMP_CA_PASSWORD
+  echo $CA_PROVISIONER_PASSWORD > $TMP_CA_PROVISIONER_PASSWORD
+
+  step ca init \
+  --name "{{.Values.ca.name}}" \
+  --dns "{{include "step-certificates.dns" .}}" \
+  --address "{{.Values.ca.address}}" \
+  --provisioner "{{.Values.ca.provisioner.name}}" \
+  --with-ca-url "{{include "step-certificates.url" .}}" \
+  --password-file "$TMP_CA_PASSWORD" \
+  --provisioner-password-file "$TMP_CA_PROVISIONER_PASSWORD" {{ if not .Values.ca.db.enabled }}--no-db{{ end }}
+
+  rm -f $TMP_CA_PASSWORD $TMP_CA_PROVISIONER_PASSWORD
+
+  echo -e "\n\e[1mCreating configmaps and secrets in {{.Release.Namespace}} namespace ...\e[0m"
+
+  # Replace secrets created on helm install
+  # It allows to properly remove them on helm delete
+  kbreplace -n {{.Release.Namespace}} create configmap {{ include "step-certificates.fullname" . }}-config --from-file $(step path)/config
+  kbreplace -n {{.Release.Namespace}} create configmap {{ include "step-certificates.fullname" . }}-certs --from-file $(step path)/certs
+  kbreplace -n {{.Release.Namespace}} create configmap {{ include "step-certificates.fullname" . }}-secrets --from-file $(step path)/secrets
+
+  kbreplace -n {{.Release.Namespace}} create secret generic {{ include "step-certificates.fullname" . }}-ca-password --from-literal "password=${CA_PASSWORD}"
+  kbreplace -n {{.Release.Namespace}} create secret generic {{ include "step-certificates.fullname" . }}-provisioner-password --from-literal "password=${CA_PROVISIONER_PASSWORD}"
+
+# Label all configmaps and secrets
+kubectl -n {{.Release.Namespace}} label configmap {{ include "step-certificates.fullname" . }}-config {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label configmap {{ include "step-certificates.fullname" . }}-certs {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label configmap {{ include "step-certificates.fullname" . }}-secrets {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label secret {{ include "step-certificates.fullname" . }}-ca-password {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+kubectl -n {{.Release.Namespace}} label secret {{ include "step-certificates.fullname" . }}-provisioner-password {{ include "step-certificates.labels" . | replace ": " "=" | replace "\n" " " }}
+
+# Patch webhook if autocert is enabled
+{{ if .Values.autocert.enabled }}
+  CA_BUNDLE=$(cat $(step path)/certs/root_ca.crt | base64 | tr -d '\n')
+  kubectl patch mutatingwebhookconfigurations {{ .Release.Name }}-autocert-webhook-config \
+  --type json -p="[{\"op\":\"replace\",\"path\":\"/webhooks/0/clientConfig/caBundle\",\"value\":\"$CA_BUNDLE\"}]"
+{{- end }}
+
+  echo -e "\n\e[1mStep Certificates installed!\e[0m"
+  echo
+echo "CA URL: {{include "step-certificates.url" .}}"
+echo "CA Fingerprint: $(step certificate fingerprint $(step path)/certs/root_ca.crt)"
+  echo
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/ingress.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/ingress.yaml
new file mode 100644
index 0000000..240bdaf
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/ingress.yaml
@@ -0,0 +1,57 @@
+# *****************************************************************************
+#
+  # Licensed to the Apache Software Foundation (ASF) under one
+  # or more contributor license agreements.  See the NOTICE file
+  # distributed with this work for additional information
+  # regarding copyright ownership.  The ASF licenses this file
+  # to you under the Apache License, Version 2.0 (the
+  # "License"); you may not use this file except in compliance
+  # with the License.  You may obtain a copy of the License at
+  #
+  #   http://www.apache.org/licenses/LICENSE-2.0
+  #
+  # Unless required by applicable law or agreed to in writing,
+  # software distributed under the License is distributed on an
+  # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  # KIND, either express or implied.  See the License for the
+  # specific language governing permissions and limitations
+  # under the License.
+  #
+  # ******************************************************************************
+
+  {{- if .Values.ingress.enabled -}}
+  {{- $fullName := include "step-certificates.fullname" . -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: {{ $fullName }}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+  {{- with .Values.ingress.annotations }}
+annotations:
+  {{- toYaml . | nindent 4 }}
+  {{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+tls:
+  {{- range .Values.ingress.tls }}
+- hosts:
+  {{- range .hosts }}
+  - {{ . | quote }}
+  {{- end }}
+  secretName: {{ .secretName }}
+  {{- end }}
+{{- end }}
+rules:
+  {{- range .Values.ingress.hosts }}
+- host: {{ .host | quote }}
+  http:
+    paths:
+    {{- range .paths }}
+    - path: {{ . }}
+      backend:
+        serviceName: {{ $fullName }}
+        servicePort: http
+    {{- end }}
+  {{- end }}
+{{- end }}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/rbac.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/rbac.yaml
new file mode 100644
index 0000000..0534856
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/rbac.yaml
@@ -0,0 +1,93 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+  {{- if .Release.IsInstall -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+rules:
+- apiGroups: [""]
+  resources: ["secrets", "configmaps"]
+  verbs: ["get", "create", "update", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+subjects:
+- kind: ServiceAccount
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+roleRef:
+  kind: Role
+  name: {{ include "step-certificates.fullname" . }}-config
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+rules:
+- apiGroups: ["admissionregistration.k8s.io"]
+  resources: ["mutatingwebhookconfigurations"]
+  verbs: ["get", "create", "update", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+  labels:
+    helm.sh/chart: {{ include "step-certificates.chart" . }}
+    app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/version: {{ .Chart.AppVersion }}
+subjects:
+- kind: ServiceAccount
+  name: {{ include "step-certificates.fullname" . }}-config
+  namespace: {{.Release.Namespace}}
+roleRef:
+  kind: ClusterRole
+  name: {{ include "step-certificates.fullname" . }}-config
+  apiGroup: rbac.authorization.k8s.io
+{{- end -}}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/secrets.yaml
similarity index 67%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/secrets.yaml
index 951fdd7..68d0b8d 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/secrets.yaml
@@ -19,22 +19,21 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Secrets that will be updated by the configuration job:
+# 1. CA keys password.
+# 2. Provisioner password.
+apiVersion: v1
+data:
+  password: ""
+kind: Secret
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-ca-password
+  namespace: {{.Release.Namespace}}
+---
+apiVersion: v1
+data:
+  password: ""
+kind: Secret
+metadata:
+  name: {{ include "step-certificates.fullname" . }}-provisioner-password
+  namespace: {{.Release.Namespace}}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/service.yaml
similarity index 63%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/service.yaml
index 951fdd7..dccae38 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/service.yaml
@@ -19,22 +19,22 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "step-certificates.fullname" . }}
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    - port: {{ .Values.service.port }}
+      targetPort: {{ .Values.service.targetPort }}
+{{- if .Values.service.nodePort }}
+nodePort: {{ .Values.service.nodePort }}
+{{- end }}
+protocol: TCP
+name: https
+selector:
+  app.kubernetes.io/name: {{ include "step-certificates.name" . }}
+  app.kubernetes.io/instance: {{ .Release.Name }}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/tests/test-connection.yaml
similarity index 69%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/tests/test-connection.yaml
index 951fdd7..4fe296d 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/templates/tests/test-connection.yaml
@@ -19,22 +19,18 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ include "step-certificates.fullname" . }}-test-connection"
+  labels:
+  {{ include "step-certificates.labels" . | indent 4 }}
+annotations:
+  "helm.sh/hook": test-success
+spec:
+  containers:
+    - name: wget
+      image: busybox
+      command: ['wget']
+      args:  ['{{ include "step-certificates.fullname" . }}:{{ .Values.service.port }}']
+  restartPolicy: Never
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/values.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/values.yaml
new file mode 100644
index 0000000..269e7fa
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-chart/values.yaml
@@ -0,0 +1,125 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+# Default values for step-certificates.
+
+# replicaCount is the number of replicas of step-certificates.
+# Only one replica is supported at this time.
+replicaCount: 1
+
+# nameOverride overrides the name of the chart.
+nameOverride: ""
+# fullnameOverride overrides the full name of the chart.
+fullnameOverride: ""
+
+# image contains the docker image for step-certificates.
+image:
+  repository: smallstep/step-ca
+  tag: 0.13.2
+  pullPolicy: IfNotPresent
+
+# bootstrapImage contains the docker image for the bootstrap of the configuration.
+bootstrapImage:
+  repository: smallstep/step-ca-bootstrap
+  tag: latest
+  pullPolicy: IfNotPresent
+
+# service contains configuration for the kubernetes service.
+service:
+  type: ClusterIP
+  port: 443
+  targetPort: 9000
+
+# ca contains the certificate authority configuration.
+ca:
+  # name is new public key infrastructure (PKI) names.
+  name: dlab-step-ca
+  # address is the HTTP listener address of step-certificates.
+  address: :9000
+  # dns is the comma separated dns names to use. Leave it empty to use the format:
+  # {include "step-certificates.fullname" .}.{ .Release.Namespace}.svc.cluster.local,127.0.0.1
+  dns: ${step_chart_name}.${namespace}.svc.cluster.local,${step_ca_host}
+  # ${step_ca_host}
+  # url is the http url where step-certificates will listen at. Leave it empty to use the format
+  # https://{{ include "step-certificates.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
+  url: https://${step_chart_name}.${namespace}.svc.cluster.local
+  #${step_ca_host}
+  # password is the password used to encrypt the keys. Leave it empty to generate a random one.
+  password: ${step_ca_password}
+  # provisioner contains the step-certificates provisioner configuration.
+  provisioner:
+    # name is the new provisioner name.
+    name: admin
+    # password is the password used to encrypt the provisioner private key.
+    password: ${step_ca_provisioner_password}
+  # db contains the step-certificate dataabase configuration.
+  db:
+    # enabled defines if the database is enabled.
+    enabled: true
+    # persistent defines if a Persistent Volume Claim is used, if false and emptyDir will be used.
+    persistent: true
+    # storeageClass is Persistent Volume Storage Class
+    # If defined, storageClassName: <storageClass>.
+    # If set to "-", storageClassName: "", which disables dynamic provisioning.
+    # If undefined or set to null, no storageClassName spec is set, choosing the
+    # default provisioner (gp2 on AWS, standard on GKE, AWS & OpenStack).
+    storageClass: standard
+    # accessModes defines the Persistent Volume Access Mode.
+    accessModes:
+      - ReadWriteOnce
+    # size is the Persistent Volume size.
+    size: 10Gi
+  # runAsRoot runs the ca as root instead of the step user. This is required in
+  # some storage provisioners.
+  runAsRoot: false
+
+# autocert is used to configure the autocert chart that depends on step-certificates.
+autocert:
+  enabled: false
+
+# ingress contains the configuration for an ingress controller.
+ingress:
+  enabled: false
+  annotations: {}
+  hosts: []
+  tls: []
+
+# resources contains the CPU/memory resource requests/limits.
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  # limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  # requests:
+  #   cpu: 100m
+#   memory: 128Mi
+
+# nodeSelector contains the node labels for pod assignment.
+nodeSelector: {}
+
+# tolerations contains the toleration labels for pod assignment.
+tolerations: []
+
+# affinity contains the affinity settings for pod assignment.
+affinity: {}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/Chart.yaml
index 16da950..cbb683a 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: step-ca-issuer
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..66e3377
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "step-ca-issuer.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "step-ca-issuer.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "step-ca-issuer.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "step-ca-issuer.labels" -}}
+app.kubernetes.io/name: {{ include "step-ca-issuer.name" . }}
+helm.sh/chart: {{ include "step-ca-issuer.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/stepissuer.yaml
similarity index 71%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/stepissuer.yaml
index 951fdd7..caeeb92 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/templates/stepissuer.yaml
@@ -1,3 +1,4 @@
+{{- /*
 # *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -18,23 +19,19 @@
 # under the License.
 #
 # ******************************************************************************
+*/ -}}
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+apiVersion: certmanager.step.sm/v1beta1
+kind: StepIssuer
+metadata:
+  name: step-issuer
+  namespace: {{ .Values.namespace }}
+spec:
+  url: {{ .Values.ca_url }}
+  caBundle:  {{ .Values.ca_bundle }}
+  provisioner:
+    name: {{ .Values.kid_name }}
+    kid: {{ .Values.kid }}
+    passwordRef:
+      name: step-certificates-provisioner-password
+      key: password
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/values.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/values.yaml
index 16da950..0cb4b94 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca-issuer-chart/values.yaml
@@ -19,19 +19,14 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+replicaCount: 1
 
+ingress:
+  enabled: false
+labels: {}
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+ca_url: ${step_ca_url}
+ca_bundle: ${step_ca_bundle}
+namespace: ${namespace}
+kid_name: ${step_ca_kid_name}
+kid: ${step_ca_kid}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca.tf
new file mode 100644
index 0000000..c920367
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-ca.tf
@@ -0,0 +1,78 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+  step_ca_name = "step-certificates"
+}
+
+resource "kubernetes_service" "step_service_lb" {
+
+  depends_on = [null_resource.cert_manager_delay]
+  metadata {
+    name = "step-certs"
+    namespace = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+  spec {
+    selector = {
+      "app" = local.step_ca_name
+    }
+    session_affinity = "ClientIP"
+    port {
+      port        = 443
+      target_port = 9000
+    }
+
+    type = "LoadBalancer"
+  }
+}
+
+data "template_file" "step_ca_values" {
+  template = file("./modules/helm_charts/step-ca-chart/values.yaml")
+  vars     = {
+    step_ca_password             = random_string.step_ca_password.result
+    step_ca_provisioner_password = random_string.step_ca_provisioner_password.result
+    step_ca_host                 = kubernetes_service.step_service_lb.load_balancer_ingress.0.ip
+    step_chart_name              = local.step_ca_name
+    namespace                    = kubernetes_namespace.dlab-namespace.metadata[0].name
+  }
+}
+
+resource "helm_release" "step_ca" {
+  name       = local.step_ca_name
+  chart      = "./modules/helm_charts/step-ca-chart"
+  namespace  = kubernetes_namespace.dlab-namespace.metadata[0].name
+  # depends_on = [kubernetes_service.step_service_lb]
+  wait       = false
+  timeout    = 600
+
+  values     = [
+    data.template_file.step_ca_values.rendered
+  ]
+}
+
+resource "null_resource" "step_ca_delay" {
+  provisioner "local-exec" {
+    command = "sleep 120"
+  }
+  triggers = {
+    "before" = helm_release.step_ca.name
+  }
+}
\ No newline at end of file
diff --git a/services/self-service/Dockerfile_aws b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/.helmignore
similarity index 72%
copy from services/self-service/Dockerfile_aws
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/.helmignore
index 951fdd7..4976779 100644
--- a/services/self-service/Dockerfile_aws
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/.helmignore
@@ -19,22 +19,25 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN apk add --update \
-    python \
-    python-dev \
-    py-pip \
-    build-base \
-    && pip install awscli --upgrade \
-    && apk --purge -v del py-pip \
-    && rm -rf /var/cache/apk/*
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
-
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/Chart.yaml
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/Chart.yaml
index 16da950..832b44c 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/Chart.yaml
@@ -19,19 +19,8 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
-
-
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: step-issuer
+version: 0.1.0
diff --git a/services/self-service/Dockerfile_gcp b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/NOTES.txt
similarity index 74%
copy from services/self-service/Dockerfile_gcp
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/NOTES.txt
index 16da950..58e9f20 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/NOTES.txt
@@ -19,19 +19,9 @@
 #
 # ******************************************************************************
 
-FROM openjdk:8-alpine
+Your release is named {{ .Release.Name }}.
 
+To learn more about the release, try:
 
-USER root
-
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
-
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+  $ helm status {{ .Release.Name }}
+  $ helm get {{ .Release.Name }}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/_helpers.tpl b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/_helpers.tpl
new file mode 100644
index 0000000..9cd3910
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/_helpers.tpl
@@ -0,0 +1,65 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "step-issuer.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "step-issuer.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "step-issuer.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "step-issuer.labels" -}}
+app.kubernetes.io/name: {{ include "step-issuer.name" . }}
+helm.sh/chart: {{ include "step-issuer.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/deployment.yaml b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/deployment.yaml
new file mode 100644
index 0000000..c010d77
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/templates/deployment.yaml
@@ -0,0 +1,360 @@
+{{- /*
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+*/ -}}
+
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: controller-manager
+  name: step-issuer-system
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  name: stepissuers.certmanager.step.sm
+spec:
+  group: certmanager.step.sm
+  names:
+    kind: StepIssuer
+    plural: stepissuers
+  scope: ""
+  validation:
+    openAPIV3Schema:
+      description: StepIssuer is the Schema for the stepissuers API
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          description: StepIssuerSpec defines the desired state of StepIssuer
+          properties:
+            caBundle:
+              description: CABundle is a base64 encoded TLS certificate used to verify
+                connections to the step certificates server. If not set the system
+                root certificates are used to validate the TLS connection.
+              format: byte
+              type: string
+            provisioner:
+              description: Provisioner contains the step certificates provisioner
+                configuration.
+              properties:
+                kid:
+                  description: KeyID is the kid property of the JWK provisioner.
+                  type: string
+                name:
+                  description: Names is the name of the JWK provisioner.
+                  type: string
+                passwordRef:
+                  description: PasswordRef is a reference to a Secret containing the
+                    provisioner password used to decrypt the provisioner private key.
+                  properties:
+                    key:
+                      description: The key of the secret to select from. Must be a
+                        valid secret key.
+                      type: string
+                    name:
+                      description: The name of the secret in the pod's namespace to
+                        select from.
+                      type: string
+                  required:
+                  - name
+                  type: object
+              required:
+              - kid
+              - name
+              - passwordRef
+              type: object
+            url:
+              description: URL is the base URL for the step certificates instance.
+              type: string
+          required:
+          - provisioner
+          - url
+          type: object
+        status:
+          description: StepIssuerStatus defines the observed state of StepIssuer
+          properties:
+            conditions:
+              items:
+                description: StepIssuerCondition contains condition information for
+                  the step issuer.
+                properties:
+                  lastTransitionTime:
+                    description: LastTransitionTime is the timestamp corresponding
+                      to the last status change of this condition.
+                    format: date-time
+                    type: string
+                  message:
+                    description: Message is a human readable description of the details
+                      of the last transition, complementing reason.
+                    type: string
+                  reason:
+                    description: Reason is a brief machine readable explanation for
+                      the condition's last transition.
+                    type: string
+                  status:
+                    description: Status of the condition, one of ('True', 'False',
+                      'Unknown').
+                    enum:
+                    - "True"
+                    - "False"
+                    - Unknown
+                    type: string
+                  type:
+                    description: Type of the condition, currently ('Ready').
+                    enum:
+                    - Ready
+                    type: string
+                required:
+                - status
+                - type
+                type: object
+              type: array
+          type: object
+      type: object
+  versions:
+  - name: v1beta1
+    served: true
+    storage: true
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: step-issuer-leader-election-role
+  namespace: step-issuer-system
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - configmaps/status
+  verbs:
+  - get
+  - update
+  - patch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  creationTimestamp: null
+  name: step-issuer-manager-role
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - certmanager.k8s.io
+  resources:
+  - certificaterequests
+  verbs:
+  - get
+  - list
+  - update
+  - watch
+- apiGroups:
+  - certmanager.k8s.io
+  resources:
+  - certificaterequests/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - certmanager.step.sm
+  resources:
+  - stepissuers
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - certmanager.step.sm
+  resources:
+  - stepissuers/status
+  verbs:
+  - get
+  - patch
+  - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: step-issuer-proxy-role
+rules:
+- apiGroups:
+  - authentication.k8s.io
+  resources:
+  - tokenreviews
+  verbs:
+  - create
+- apiGroups:
+  - authorization.k8s.io
+  resources:
+  - subjectaccessreviews
+  verbs:
+  - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: step-issuer-leader-election-rolebinding
+  namespace: step-issuer-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: step-issuer-leader-election-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: step-issuer-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: step-issuer-manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: step-issuer-manager-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: step-issuer-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: step-issuer-proxy-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: step-issuer-proxy-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: step-issuer-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    prometheus.io/port: "8443"
+    prometheus.io/scheme: https
+    prometheus.io/scrape: "true"
+  labels:
+    control-plane: controller-manager
+  name: step-issuer-controller-manager-metrics-service
+  namespace: step-issuer-system
+spec:
+  ports:
+  - name: https
+    port: 8443
+    targetPort: https
+  selector:
+    control-plane: controller-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    control-plane: controller-manager
+  name: step-issuer-controller-manager
+  namespace: step-issuer-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      control-plane: controller-manager
+  template:
+    metadata:
+      labels:
+        control-plane: controller-manager
+    spec:
+      containers:
+      - args:
+        - --secure-listen-address=0.0.0.0:8443
+        - --upstream=http://127.0.0.1:8080/
+        - --logtostderr=true
+        - --v=10
+        image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
+        name: kube-rbac-proxy
+        ports:
+        - containerPort: 8443
+          name: https
+      - args:
+        - --metrics-addr=127.0.0.1:8080
+        - --enable-leader-election
+        command:
+        - /manager
+        image: smallstep/step-issuer:0.1.0
+        name: manager
+        resources:
+          limits:
+            cpu: 100m
+            memory: 30Mi
+          requests:
+            cpu: 100m
+            memory: 20Mi
+      terminationGracePeriodSeconds: 10
\ No newline at end of file
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/values.yaml
similarity index 77%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/values.yaml
index d0cfc24..0c6d2cf 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer-chart/values.yaml
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,10 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+replicaCount: 1
+
+ingress:
+  enabled: false
+labels: {}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer.tf
new file mode 100644
index 0000000..2cbb247
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/step-issuer.tf
@@ -0,0 +1,81 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+data "template_file" "step_issuer_values" {
+  template = file("./modules/helm_charts/step-issuer-chart/values.yaml")
+}
+
+resource "helm_release" "step-issuer" {
+    name       = "step-issuer"
+    chart      = "./modules/helm_charts/step-issuer-chart"
+    wait       = true
+    depends_on = [null_resource.step_ca_delay]
+
+    values     = [
+        data.template_file.step_issuer_values.rendered
+    ]
+}
+
+resource "null_resource" "step_issuer_delay" {
+  provisioner "local-exec" {
+    command = "sleep 120"
+  }
+  triggers = {
+    "before" = helm_release.step-issuer.name
+  }
+}
+
+data "template_file" "step_ca_issuer_values" {
+  template = file("./modules/helm_charts/step-ca-issuer-chart/values.yaml")
+  vars     = {
+    step_ca_url      = "https://${kubernetes_service.step_service_lb.load_balancer_ingress.0.ip}"
+    step_ca_bundle   = lookup(data.external.step-ca-config-values.result, "rootCa")
+    namespace        = kubernetes_namespace.dlab-namespace.metadata[0].name
+    step_ca_kid_name = lookup(data.external.step-ca-config-values.result, "kidName")
+    step_ca_kid      = lookup(data.external.step-ca-config-values.result, "kid")
+  }
+}
+
+resource "helm_release" "step-ca-issuer" {
+    name       = "step-ca-issuer"
+    chart      = "./modules/helm_charts/step-ca-issuer-chart"
+    wait       = true
+    depends_on = [null_resource.step_issuer_delay]
+
+    values     = [
+        data.template_file.step_ca_issuer_values.rendered
+    ]
+}
+
+resource "null_resource" "step_ca_issuer_delay" {
+  provisioner "local-exec" {
+    command = "sleep 60"
+  }
+  triggers = {
+    "before" = helm_release.step-ca-issuer.name
+  }
+}
+
+data "external" "step-ca-config-values" {
+  program     = ["sh", "./modules/helm_charts/files/get_configmap_values.sh", var.credentials_file_path,
+                 var.gke_cluster_name, var.region, var.project_id]
+  depends_on  = [null_resource.step_issuer_delay]
+}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/variables.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/variables.tf
new file mode 100644
index 0000000..be4f82c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/helm_charts/variables.tf
@@ -0,0 +1,88 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "namespace_name" {}
+
+variable "mongo_dbname" {}
+
+variable "mongo_db_username" {}
+
+variable "mongo_service_port" {}
+
+variable "mongo_service_name" {}
+
+variable "ssn_k8s_alb_dns_name" {}
+
+variable "service_base_name" {}
+
+variable "ldap_host" {}
+
+variable "ldap_dn" {}
+
+variable "ldap_users_group" {}
+
+variable "ldap_user" {}
+
+variable "ldap_bind_creds" {}
+
+variable "keycloak_user" {}
+
+variable "ldap_usernameAttr" {}
+
+variable "ldap_rdnAttr" {}
+
+variable "ldap_uuidAttr" {}
+
+variable "mysql_db_name" {}
+
+variable "mysql_user" {}
+
+variable "region" {}
+
+variable "mongo_image_tag" {}
+
+variable "mongo_node_port" {}
+
+variable "gke_cluster_name" {}
+
+variable "big_query_dataset" {}
+
+variable "env_os" {}
+
+variable "credentials_file_path" {}
+
+variable "project_id" {}
+
+variable "custom_certs_enabled" {}
+
+variable "custom_cert_path" {}
+
+variable "custom_key_path" {}
+
+variable "custom_certs_host" {}
+
+variable "mysql_disk_size" {}
+
+variable "domain" {}
+
+variable "keycloak_realm_name" {}
+
+variable "keycloak_client_id" {}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/outputs.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/outputs.tf
new file mode 100644
index 0000000..1a2028b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/outputs.tf
@@ -0,0 +1,76 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+output "service_base_name" {
+  value = var.service_base_name
+}
+
+output "vpc_id" {
+  value = module.gke_cluster.vpc_name
+}
+
+output "subnet_id" {
+  value = module.gke_cluster.subnet_name
+}
+
+output "keycloak_client_secret" {
+    value = module.helm_charts.keycloak_client_secret
+}
+
+output "keycloak_client_id" {
+    value = var.keycloak_client_id
+}
+
+output "ssn_ui_host" {
+    value = module.helm_charts.ssn_ui_host
+}
+
+output "step_root_ca" {
+    value = module.helm_charts.step_root_ca
+}
+
+output "step_kid" {
+    value = module.helm_charts.step_kid
+}
+
+output "step_kid_password" {
+    value = module.helm_charts.step_kid_password
+}
+
+output "step_ca_url" {
+    value = module.helm_charts.step_ca_url
+}
+
+output "keycloak_auth_server_url" {
+    value = "https://${module.helm_charts.ssn_ui_host}/auth"
+}
+
+output "keycloak_realm_name" {
+    value = var.keycloak_realm_name
+}
+
+output "keycloak_user_name" {
+    value = var.keycloak_user
+}
+
+output "keycloak_user_password" {
+    value = module.helm_charts.keycloak_user_password
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/variables.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/variables.tf
new file mode 100644
index 0000000..fbecd7c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/variables.tf
@@ -0,0 +1,204 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+variable "namespace_name" {
+    default = "dlab"
+}
+
+variable "credentials_file_path" {
+  default = ""
+}
+
+variable "project_id" {
+  default = ""
+}
+
+variable "region" {
+  default = "us-west1"
+}
+
+variable "zone" {
+  default = "us-west1-a"
+}
+
+variable "vpc_name" {
+  default = ""
+}
+
+variable "subnet_name" {
+  default = ""
+}
+
+variable "service_base_name" {
+  default = "dlab-k8s"
+}
+
+variable "subnet_cidr" {
+  default = "172.31.0.0/24"
+}
+
+variable "additional_tag" {
+  default = "product:dlab"
+}
+
+variable "ssn_k8s_workers_count" {
+  default = 1
+}
+
+variable "gke_cluster_version" {
+  default = "1.14.8-gke.12"
+}
+
+// Couldn't assign in GCP
+//variable "tag_resource_id" {
+//  default = "user:tag"
+//}
+
+variable "ssn_k8s_workers_shape" {
+  default = "n1-standard-2"
+}
+
+variable "service_account_iam_roles" {
+  default = [
+    "roles/logging.logWriter",
+    "roles/monitoring.metricWriter",
+    "roles/monitoring.viewer",
+    "roles/storage.objectViewer",
+    "roles/iam.serviceAccountTokenCreator",
+    "roles/iam.serviceAccountKeyAdmin",
+    "roles/dns.admin"
+  ]
+}
+
+variable "ssn_k8s_alb_dns_name" {
+    default = ""
+}
+
+variable "keycloak_user" {
+    default = "dlab-admin"
+}
+
+variable "mysql_user" {
+    default = "keycloak"
+}
+
+variable "mysql_db_name" {
+    default = "keycloak"
+}
+
+variable "ldap_usernameAttr" {
+    default = "uid"
+}
+
+variable "ldap_rdnAttr" {
+    default = "uid"
+}
+
+variable "ldap_uuidAttr" {
+    default = "uid"
+}
+
+variable "ldap_users_group" {
+    default = "ou=People"
+}
+
+variable "ldap_dn" {
+    default = "dc=example,dc=com"
+}
+
+variable "ldap_user" {
+    default = "cn=admin"
+}
+
+variable "ldap_bind_creds" {
+    default = ""
+}
+
+variable "ldap_host" {
+    default = ""
+}
+
+variable "mongo_db_username" {
+    default = "admin"
+}
+
+variable "mongo_dbname" {
+    default = "dlabdb"
+}
+
+variable "mongo_image_tag" {
+    default = "4.0.10-debian-9-r13"
+    description = "MongoDB Image tag"
+}
+
+variable "mongo_service_port" {
+    default = "27017"
+}
+
+variable "mongo_node_port" {
+    default = "31017"
+}
+
+variable "mongo_service_name" {
+    default = "mongo-ha-mongodb"
+}
+
+# variable "endpoint_eip_address" {}
+
+variable "env_os" {
+  default = "debian"
+}
+
+variable "big_query_dataset" {
+  default = ""
+}
+
+variable "custom_certs_enabled" {
+    default = "False"
+}
+
+variable "custom_cert_path" {
+    default = ""
+}
+
+variable "custom_key_path" {
+    default = ""
+}
+
+variable "custom_certs_host" {
+    default = ""
+}
+
+variable "mysql_disk_size" {
+    default = "10"
+}
+
+variable "domain" {
+  default = ""
+}
+
+variable "keycloak_realm_name" {
+  default = "dlab"
+}
+
+variable "keycloak_client_id" {
+  default = "dlab-ui"
+}
diff --git a/integration-tests-cucumber/src/test/resources/config.properties b/infrastructure-provisioning/terraform/keycloak-theme/Dockerfile
similarity index 79%
copy from integration-tests-cucumber/src/test/resources/config.properties
copy to infrastructure-provisioning/terraform/keycloak-theme/Dockerfile
index d0cfc24..6e29689 100644
--- a/integration-tests-cucumber/src/test/resources/config.properties
+++ b/infrastructure-provisioning/terraform/keycloak-theme/Dockerfile
@@ -1,3 +1,4 @@
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -16,5 +17,7 @@
 # specific language governing permissions and limitations
 # under the License.
 #
-mongo.connection.string=mongodb://localhost:27017/DLAB
-mongo.db.name=DLAB
\ No newline at end of file
+# ******************************************************************************
+
+FROM busybox
+COPY dlab /dlab
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/css/login.css b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/css/login.css
new file mode 100644
index 0000000..1f5d717
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/css/login.css
@@ -0,0 +1,499 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+.login-pf body {
+    background: url("../img/login-background.png") no-repeat center center fixed;
+    background-size: cover;
+    padding-top: 300px;
+    height: 100vh;
+}
+
+.alert-error {
+    background-color: #ffffff;
+    border-color: #cc0000;
+    color: #333333;
+}
+
+#kc-locale ul {
+    display: none;
+    position: absolute;
+    background-color: #fff;
+    list-style: none;
+    right: 0;
+    top: 20px;
+    min-width: 100px;
+    padding: 2px 0;
+    border: solid 1px #bbb;
+}
+
+#kc-locale:hover ul {
+    display: block;
+    margin: 0;
+}
+
+#kc-locale ul li a {
+    display: block;
+    padding: 5px 14px;
+    color: #000 !important;
+    text-decoration: none;
+    line-height: 20px;
+}
+
+#kc-locale ul li a:hover {
+    color: #4d5258;
+    background-color: #d4edfa;
+}
+
+#kc-locale-dropdown a {
+    color: #4d5258;
+    background: 0 0;
+    padding: 0 15px 0 0;
+    font-weight: 300;
+}
+
+#kc-locale-dropdown a:hover {
+    text-decoration: none;
+}
+
+a#kc-current-locale-link {
+    display: block;
+    padding: 0 5px;
+}
+
+a#kc-current-locale-link::after {
+    content: "\2c5";
+    margin-left: 4px;
+}
+
+.login-pf .container {
+    padding-top: 60px;
+}
+
+.login-pf a:hover {
+    color: #0099d3;
+}
+
+#kc-logo {
+    width: 100%;
+}
+
+#kc-logo-wrapper {
+    background-image: url(../img/keycloak-logo-2.png);
+    background-color: red;
+    background-repeat: no-repeat;
+    background-size: contain;
+    width: 340px;
+    height: 240px;
+    margin: 62px auto 0;
+}
+
+div.kc-logo-text {
+    width: 500px;
+    margin: 0 auto;
+}
+
+div.kc-logo-text span {
+    display: block;
+    overflow: hidden;
+    text-overflow: ellipsis;
+}
+
+#kc-header {
+    color: #ededed;
+    overflow: visible;
+    white-space: nowrap;
+}
+
+#kc-header-wrapper {
+    font-size: 29px;
+    text-transform: uppercase;
+    letter-spacing: 3px;
+    line-height: 1.2em;
+    white-space: normal;
+}
+
+#kc-content {
+    width: 100%;
+}
+
+#kc-info {
+    padding-bottom: 200px;
+    margin-bottom: -200px;
+}
+
+#kc-info-wrapper {
+    font-size: 13px;
+}
+
+#kc-form-options span {
+    display: block;
+}
+
+#kc-form-options .checkbox {
+    margin-top: 0;
+    color: #72767b;
+}
+
+#kc-terms-text {
+    margin-bottom: 20px;
+}
+
+#kc-registration {
+    margin-bottom: 15px;
+}
+
+/* TOTP */
+
+ol#kc-totp-settings {
+    margin: 0;
+    padding-left: 20px;
+}
+
+ul#kc-totp-supported-apps {
+    margin-bottom: 10px;
+}
+
+#kc-totp-secret-qr-code {
+    max-width: 150px;
+    max-height: 150px;
+}
+
+#kc-totp-secret-key {
+    background-color: #fff;
+    color: #333333;
+    font-size: 16px;
+    padding: 10px 0;
+}
+
+/* OAuth */
+
+#kc-oauth h3 {
+    margin-top: 0;
+}
+
+#kc-oauth ul {
+    list-style: none;
+    padding: 0;
+    margin: 0;
+}
+
+#kc-oauth ul li {
+    border-top: 1px solid rgba(255, 255, 255, 0.1);
+    font-size: 12px;
+    padding: 10px 0;
+}
+
+#kc-oauth ul li:first-of-type {
+    border-top: 0;
+}
+
+#kc-oauth .kc-role {
+    display: inline-block;
+    width: 50%;
+}
+
+/* Code */
+#kc-code textarea {
+    width: 100%;
+    height: 8em;
+}
+
+/* Social */
+
+#kc-social-providers ul {
+    padding: 0;
+}
+
+#kc-social-providers li {
+    display: block;
+}
+
+#kc-social-providers li:first-of-type {
+    margin-top: 0;
+}
+
+.zocial,
+a.zocial {
+    width: 100%;
+    font-weight: normal;
+    font-size: 14px;
+    text-shadow: none;
+    border: 0;
+    background: #f5f5f5;
+    color: #72767b;
+    border-radius: 0;
+    white-space: normal;
+}
+
+.zocial:before {
+    border-right: 0;
+    margin-right: 0;
+}
+
+.zocial span:before {
+    padding: 7px 10px;
+    font-size: 14px;
+}
+
+.zocial:hover {
+    background: #ededed !important;
+}
+
+.zocial.facebook,
+.zocial.github,
+.zocial.google,
+.zocial.microsoft,
+.zocial.stackoverflow,
+.zocial.linkedin,
+.zocial.twitter {
+    background-image: none;
+    border: 0;
+
+    box-shadow: none;
+    text-shadow: none;
+}
+
+/* Copy of zocial windows classes to be used for microsoft's social provider button */
+.zocial.microsoft:before {
+    content: "\f15d";
+}
+
+.zocial.stackoverflow:before {
+    color: inherit;
+}
+@media (max-width: 1600px){
+    .login-pf body  {
+        background-image: url("../img/login-background1.png");
+    }
+}
+
+
+@media (min-width: 768px) {
+    #kc-container-wrapper {
+        position: absolute;
+        width: 100%;
+    }
+
+    .login-pf .container {
+        padding-right: 80px;
+    }
+
+    #kc-locale {
+        position: relative;
+        text-align: right;
+        z-index: 9999;
+    }
+}
+
+@media (max-width: 767px) {
+
+    .login-pf body {
+        background: white;
+        padding-top: 50px;
+    }
+
+    #kc-header {
+        padding-left: 15px;
+        padding-right: 15px;
+        float: none;
+    }
+
+    #kc-header-wrapper {
+        font-size: 16px;
+        font-weight: bold;
+        color: #72767b;
+        letter-spacing: 0;
+    }
+
+    div.kc-logo-text {
+        margin: auto;
+        width: 150px;
+        height: 32px;
+        background-size: 100%;
+    }
+
+    #kc-form {
+        float: none;
+    }
+
+    #kc-info-wrapper {
+        border-top: 1px solid rgba(255, 255, 255, 0.1);
+        margin-top: 15px;
+        padding-top: 15px;
+        padding-left: 0px;
+        padding-right: 15px;
+    }
+
+    #kc-social-providers li {
+        display: block;
+        margin-right: 5px;
+    }
+
+    .login-pf .container {
+        padding-top: 15px;
+        padding-bottom: 15px;
+    }
+
+    #kc-locale {
+        position: absolute;
+        width: 200px;
+        top: 20px;
+        right: 20px;
+        text-align: right;
+        z-index: 9999;
+    }
+
+    #kc-logo-wrapper {
+        background-size: 100px 21px;
+        height: 21px;
+        width: 100px;
+        margin: 20px 0 0 20px;
+    }
+
+}
+
+@media (min-height: 646px) {
+    #kc-container-wrapper {
+        bottom: 12%;
+    }
+}
+
+@media (max-height: 645px) {
+    #kc-container-wrapper {
+        padding-top: 50px;
+        top: 20%;
+    }
+}
+
+.card-pf form.form-actions .btn {
+    float: right;
+    margin-left: 10px;
+}
+
+#kc-form-buttons {
+    margin-top: 40px;
+}
+
+.login-pf-page .login-pf-brand {
+    margin-top: 20px;
+    max-width: 360px;
+    width: 40%;
+}
+
+.card-pf {
+    background: #fff;
+    margin: 0 auto;
+    padding: 0 20px;
+    max-width: 500px;
+    border-top: 0;
+    box-shadow: 0 0 0;
+}
+
+/*tablet*/
+@media (max-width: 840px) {
+    .login-pf-page .card-pf {
+        max-width: none;
+        margin-left: 20px;
+        margin-right: 20px;
+        padding: 20px 20px 30px 20px;
+    }
+}
+
+@media (max-width: 767px) {
+    .login-pf-page .card-pf {
+        max-width: none;
+        margin-left: 0;
+        margin-right: 0;
+        padding-top: 0;
+    }
+
+    .card-pf.login-pf-accounts {
+        max-width: none;
+    }
+}
+
+.login-pf-page .login-pf-signup {
+    font-size: 15px;
+    color: #72767b;
+}
+
+#kc-content-wrapper .row {
+    margin-left: 0;
+    margin-right: 0;
+}
+
+@media (min-width: 768px) {
+    .login-pf-page .login-pf-social-section:first-of-type {
+        padding-right: 39px;
+        border-right: 1px solid #d1d1d1;
+        margin-right: -1px;
+    }
+
+    .login-pf-page .login-pf-social-section:last-of-type {
+        padding-left: 40px;
+    }
+    .login-pf-page .login-pf-page-header {
+         margin-bottom: 20px;
+    }
+
+    .login-pf-page .login-pf-social-section .login-pf-social-link:last-of-type {
+        margin-bottom: 0;
+    }
+}
+
+.login-pf-page .login-pf-social-link {
+    margin-bottom: 25px;
+}
+
+.login-pf-page .login-pf-social-link a {
+    padding: 2px 0;
+}
+
+.login-pf-page.login-pf-page-accounts {
+    margin-left: auto;
+    margin-right: auto;
+}
+
+.login-pf-page .btn-primary {
+    margin-top: 0;
+    height: 38px;
+    background-color: #5bc0de;
+    color: #fff;
+    border-color: #46b8da;
+    background-image: none;
+}
+
+.login-pf-page .form-control {
+    width: 100%;
+    height: 36px;
+    padding: 0 10px;
+    border: 1px solid #5bc0de;
+    background: #fafafa;
+    transition: border .1s ease-out;
+    outline: 0;
+}
+
+.login-pf-page .control-label {
+    font-size: 14px;
+    font-weight: 400;
+}
+
+.login-pf-page .login-pf-header h1 {
+    display: none;
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/favicon.ico b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/favicon.ico
new file mode 100644
index 0000000..2bb416d
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/favicon.ico
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-arrow-down.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-arrow-down.png
new file mode 100644
index 0000000..6f2d9d2
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-arrow-down.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-sign.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-sign.png
new file mode 100644
index 0000000..0dd5004
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-error-sign.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-arrow-down.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-arrow-down.png
new file mode 100644
index 0000000..03cc0c4
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-arrow-down.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-sign.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-sign.png
new file mode 100644
index 0000000..640bd71
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-success-sign.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-arrow-down.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-arrow-down.png
new file mode 100644
index 0000000..6f2d9d2
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-arrow-down.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-sign.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-sign.png
new file mode 100644
index 0000000..f9392a3
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/feedback-warning-sign.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/keycloak-logo.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/keycloak-logo.png
new file mode 100644
index 0000000..ffa5b0b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/keycloak-logo.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-background.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-background.png
new file mode 100644
index 0000000..497fd08
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-background.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-background1.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-background1.png
new file mode 100644
index 0000000..5c609c3
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-background1.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-icons.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-icons.png
new file mode 100644
index 0000000..f1a018b
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/login-icons.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/logo.png b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/logo.png
new file mode 100644
index 0000000..f6d7806
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/resources/img/logo.png
Binary files differ
diff --git a/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/theme.properties b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/theme.properties
new file mode 100644
index 0000000..5ff84ad
--- /dev/null
+++ b/infrastructure-provisioning/terraform/keycloak-theme/dlab/login/theme.properties
@@ -0,0 +1,72 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+parent=base
+import=common/keycloak
+styles=node_modules/patternfly/dist/css/patternfly.css node_modules/patternfly/dist/css/patternfly-additions.css lib/zocial/zocial.css css/login.css
+meta=viewport==width=device-width,initial-scale=1
+kcHtmlClass=login-pf
+kcLoginClass=login-pf-page
+kcLogoLink=http://www.keycloak.org
+kcLogoClass=login-pf-brand
+kcContainerClass=container-fluid
+kcContentClass=col-sm-8 col-sm-offset-2 col-md-6 col-md-offset-3 col-lg-6 col-lg-offset-3
+kcContentWrapperClass=row
+kcHeaderClass=login-pf-page-header
+kcFeedbackAreaClass=col-md-12
+kcLocaleClass=col-xs-12 col-sm-1
+kcAlertIconClasserror=pficon pficon-error-circle-o
+kcFormAreaClass=col-sm-10 col-sm-offset-1 col-md-8 col-md-offset-2 col-lg-8 col-lg-offset-2
+kcFormCardClass=card-pf
+kcFormCardAccountClass=login-pf-accounts
+kcFormSocialAccountClass=login-pf-social-section
+kcFormSocialAccountContentClass=col-xs-12 col-sm-6
+kcFormSocialAccountListClass=login-pf-social list-unstyled login-pf-social-all
+kcFormSocialAccountDoubleListClass=login-pf-social-double-col
+kcFormSocialAccountListLinkClass=login-pf-social-link
+kcFormHeaderClass=login-pf-header
+kcFeedbackErrorIcon=pficon pficon-error-circle-o
+kcFeedbackWarningIcon=pficon pficon-warning-triangle-o
+kcFeedbackSuccessIcon=pficon pficon-ok
+kcFeedbackInfoIcon=pficon pficon-info
+kcFormClass=form-horizontal
+kcFormGroupClass=form-group
+kcFormGroupErrorClass=has-error
+kcLabelClass=control-label
+kcLabelWrapperClass=col-xs-12 col-sm-12 col-md-12 col-lg-12
+kcInputClass=form-control
+kcInputWrapperClass=col-xs-12 col-sm-12 col-md-12 col-lg-12
+kcFormOptionsClass=col-xs-12 col-sm-12 col-md-12 col-lg-12
+kcFormButtonsClass=col-xs-12 col-sm-12 col-md-12 col-lg-12
+kcFormSettingClass=login-pf-settings
+kcTextareaClass=form-control
+kcSignUpClass=login-pf-signup
+kcInfoAreaClass=col-xs-12 col-sm-4 col-md-4 col-lg-5 details
+##### css classes for form buttons
+# main class used for all buttons
+kcButtonClass=btn
+# classes defining priority of the button - primary or default (there is typically only one priority button for the form)
+kcButtonPrimaryClass=btn-primary
+kcButtonDefaultClass=btn-default
+# classes defining size of the button
+kcButtonLargeClass=btn-lg
+kcButtonBlockClass=btn-block
+##### css classes for input
+kcInputLargeClass=input-lg
+##### css classes for form accessability
+kcSrOnlyClass=sr-only
diff --git a/integration-tests-cucumber/pom.xml b/integration-tests-cucumber/pom.xml
deleted file mode 100644
index d862bcb..0000000
--- a/integration-tests-cucumber/pom.xml
+++ /dev/null
@@ -1,133 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~   http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing,
-  ~ software distributed under the License is distributed on an
-  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  ~ KIND, either express or implied.  See the License for the
-  ~ specific language governing permissions and limitations
-  ~ under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <groupId>com.epam.dlab</groupId>
-    <artifactId>integration-tests</artifactId>
-    <version>1.0.0-SNAPSHOT</version>
-    <packaging>jar</packaging>
-
-    <properties>
-        <cucumber.version>4.2.6</cucumber.version>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    </properties>
-
-    <dependencies>
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-core</artifactId>
-            <version>2.9.9</version>
-        </dependency>
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-databind</artifactId>
-            <version>2.9.9</version>
-        </dependency>
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-annotations</artifactId>
-            <version>2.9.9</version>
-        </dependency>
-        <dependency>
-            <groupId>org.projectlombok</groupId>
-            <artifactId>lombok</artifactId>
-            <version>1.18.8</version>
-        </dependency>
-        <dependency>
-            <groupId>org.mongodb</groupId>
-            <artifactId>mongo-java-driver</artifactId>
-            <version>3.10.2</version>
-        </dependency>
-
-
-        <dependency>
-            <groupId>io.cucumber</groupId>
-            <artifactId>cucumber-java</artifactId>
-            <version>${cucumber.version}</version>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>io.cucumber</groupId>
-            <artifactId>cucumber-junit</artifactId>
-            <version>${cucumber.version}</version>
-            <scope>test</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.12</version>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>com.jayway.restassured</groupId>
-            <artifactId>rest-assured</artifactId>
-            <version>2.9.0</version>
-            <scope>test</scope>
-        </dependency>
-
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-surefire-plugin</artifactId>
-                <configuration>
-                    <testFailureIgnore>true</testFailureIgnore>
-                </configuration>
-            </plugin>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>3.7.0</version>
-                <configuration>
-                    <encoding>UTF-8</encoding>
-                    <source>1.8</source>
-                    <target>1.8</target>
-                </configuration>
-            </plugin>
-            <plugin>
-                <groupId>net.masterthought</groupId>
-                <artifactId>maven-cucumber-reporting</artifactId>
-                <version>2.8.0</version>
-                <executions>
-                    <execution>
-                        <id>execution</id>
-                        <phase>verify</phase>
-                        <goals>
-                            <goal>generate</goal>
-                        </goals>
-                        <configuration>
-                            <projectName>CucumberWebGui</projectName>
-                            <outputDirectory>${project.build.directory}/cucumber-report-html</outputDirectory>
-                            <cucumberOutput>${project.build.directory}/cucumber.json</cucumberOutput>
-                            <skippedFails>true</skippedFails>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-        </plugins>
-    </build>
-</project>
diff --git a/integration-tests-cucumber/src/main/java/org/apache/dlab/dto/EndpointDTO.java b/integration-tests-cucumber/src/main/java/org/apache/dlab/dto/EndpointDTO.java
deleted file mode 100644
index 7cfdad2..0000000
--- a/integration-tests-cucumber/src/main/java/org/apache/dlab/dto/EndpointDTO.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.dlab.dto;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import lombok.AllArgsConstructor;
-import lombok.Data;
-import lombok.NoArgsConstructor;
-
-@Data
-@JsonIgnoreProperties(ignoreUnknown = true)
-@AllArgsConstructor
-@NoArgsConstructor
-public class EndpointDTO {
-	private String name;
-	private String url;
-	private String account;
-	@JsonProperty("endpoint_tag")
-	private String tag;
-}
diff --git a/integration-tests-cucumber/src/main/java/org/apache/dlab/mongo/MongoDBHelper.java b/integration-tests-cucumber/src/main/java/org/apache/dlab/mongo/MongoDBHelper.java
deleted file mode 100644
index 4903fd4..0000000
--- a/integration-tests-cucumber/src/main/java/org/apache/dlab/mongo/MongoDBHelper.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.dlab.mongo;
-
-import com.mongodb.client.MongoClient;
-import com.mongodb.client.MongoClients;
-import org.apache.dlab.util.PropertyHelper;
-
-public class MongoDBHelper {
-	private static final MongoClient client = MongoClients
-			.create(PropertyHelper.read("mongo.connection.string"));
-
-	public static void cleanCollection(String collection) {
-		client.getDatabase(PropertyHelper.read("mongo.db.name")).getCollection(collection).drop();
-	}
-}
diff --git a/integration-tests-cucumber/src/main/java/org/apache/dlab/util/JacksonMapper.java b/integration-tests-cucumber/src/main/java/org/apache/dlab/util/JacksonMapper.java
deleted file mode 100644
index ae4d5ce..0000000
--- a/integration-tests-cucumber/src/main/java/org/apache/dlab/util/JacksonMapper.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.dlab.util;
-
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
-
-public final class JacksonMapper {
-	private static final ObjectMapper MAPPER = new ObjectMapper();
-
-	public static <T> String marshall(T obj) {
-		try {
-			return MAPPER.writeValueAsString(obj);
-		} catch (JsonProcessingException e) {
-			throw new IllegalArgumentException(e);
-		}
-	}
-}
diff --git a/integration-tests-cucumber/src/main/java/org/apache/dlab/util/PropertyHelper.java b/integration-tests-cucumber/src/main/java/org/apache/dlab/util/PropertyHelper.java
deleted file mode 100644
index 71688e2..0000000
--- a/integration-tests-cucumber/src/main/java/org/apache/dlab/util/PropertyHelper.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.dlab.util;
-
-import java.io.FileInputStream;
-import java.io.InputStream;
-import java.util.Properties;
-
-public class PropertyHelper {
-
-	private final static Properties PROPERTIES;
-
-	static {
-		PROPERTIES = new Properties();
-		try (InputStream inputStream = new FileInputStream(System.getProperty("config.file"))) {
-			PROPERTIES.load(inputStream);
-		} catch (Exception e) {
-			e.printStackTrace();
-		}
-	}
-
-	public static String read(String prop) {
-		return PROPERTIES.getProperty(prop);
-	}
-}
diff --git a/integration-tests-cucumber/src/test/java/dlab/Constants.java b/integration-tests-cucumber/src/test/java/dlab/Constants.java
deleted file mode 100644
index 8e1b6b9..0000000
--- a/integration-tests-cucumber/src/test/java/dlab/Constants.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package dlab;
-
-public interface Constants {
-	String API_URI = "https://localhost:8443/api/";
-}
diff --git a/integration-tests-cucumber/src/test/java/dlab/RunCucumberTest.java b/integration-tests-cucumber/src/test/java/dlab/RunCucumberTest.java
deleted file mode 100644
index fb03b55..0000000
--- a/integration-tests-cucumber/src/test/java/dlab/RunCucumberTest.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package dlab;
-
-import cucumber.api.CucumberOptions;
-import cucumber.api.junit.Cucumber;
-import org.junit.runner.RunWith;
-
-@RunWith(Cucumber.class)
-@CucumberOptions(plugin = {"json:target/cucumber.json"})
-public class RunCucumberTest {
-}
diff --git a/integration-tests-cucumber/src/test/java/dlab/endpoint/EndpointSteps.java b/integration-tests-cucumber/src/test/java/dlab/endpoint/EndpointSteps.java
deleted file mode 100644
index 1c1d43a..0000000
--- a/integration-tests-cucumber/src/test/java/dlab/endpoint/EndpointSteps.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package dlab.endpoint;
-
-import com.jayway.restassured.http.ContentType;
-import com.jayway.restassured.response.Response;
-import com.jayway.restassured.specification.RequestSpecification;
-import cucumber.api.java.en.And;
-import cucumber.api.java.en.Given;
-import cucumber.api.java.en.Then;
-import cucumber.api.java.en.When;
-import org.apache.dlab.dto.EndpointDTO;
-import org.apache.dlab.mongo.MongoDBHelper;
-import org.apache.dlab.util.JacksonMapper;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import static com.jayway.restassured.RestAssured.given;
-import static dlab.Constants.API_URI;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.core.IsEqual.equalTo;
-
-public class EndpointSteps {
-	private RequestSpecification request;
-	private Response response;
-	private String name;
-
-	@Given("User try to create new endpoint with name {string} and uri {string} and account {string} and {string}")
-	public void userTryToCreateNewEndpoint(String name, String uri, String account, String tag) {
-		this.name = name;
-		request = given().body(JacksonMapper.marshall(new EndpointDTO(name, uri, account, tag)))
-				.auth()
-				.oauth2("token123")
-				.contentType(ContentType.JSON);
-
-	}
-
-	@When("User send create new endpoint request")
-	public void userSendCreateNewEndpoint() {
-		response = request.post(API_URI + "endpoint");
-	}
-
-	@Given("There is no endpoints in DLab")
-	public void thereIsNoEndpointsInDLab() {
-		MongoDBHelper.cleanCollection("endpoints");
-
-	}
-
-	@Then("Response status code is {int}")
-	public void responseStatusCodeIs(int code) {
-		assertThat(response.getStatusCode(), equalTo(code));
-	}
-
-	@And("Endpoint URI is present in location header")
-	public void endpointURIIsPresentInLocationHeader() {
-		assertThat(response.getHeader("Location"), equalTo(API_URI + "endpoint/" + name));
-	}
-
-	@When("User try to get information about endpoint with name {string}")
-	public void userTryToGetInformationAboutEndpointWithName(String endpoint) throws URISyntaxException {
-		response = authenticatedRequest()
-				.get(new URI(API_URI + "endpoint/" + endpoint));
-
-	}
-
-	@And("Endpoint information is successfully returned with " +
-			"name {string}, uri {string}, account {string}, and tag {string}")
-	public void endpointInformationIsSuccessfullyReturnedWithNameUriAccountAndTag(String name, String uri,
-																				  String account, String tag) {
-		final EndpointDTO dto = response.getBody().as(EndpointDTO.class);
-		assertThat(dto.getAccount(), equalTo(account));
-		assertThat(dto.getName(), equalTo(name));
-		assertThat(dto.getUrl(), equalTo(uri));
-		assertThat(dto.getTag(), equalTo(tag));
-
-	}
-
-	@When("User try to get information about endpoints")
-	public void userTryToGetInformationAboutEndpoints() throws URISyntaxException {
-		response = authenticatedRequest()
-				.get(new URI(API_URI + "endpoint"));
-
-	}
-
-	@And("There are endpoints with name test1 and test2")
-	public void thereAreEndpointsWithNameTestAndTest() {
-		final EndpointDTO[] endpoints = response.getBody().as(EndpointDTO[].class);
-		assertThat(2, equalTo(endpoints.length));
-		assertThat("test1", equalTo(endpoints[0].getName()));
-		assertThat("test2", equalTo(endpoints[1].getName()));
-	}
-
-	private RequestSpecification authenticatedRequest() {
-		return given()
-				.auth()
-				.oauth2("token123");
-	}
-}
diff --git a/integration-tests-cucumber/src/test/java/dlab/login/LoginSteps.java b/integration-tests-cucumber/src/test/java/dlab/login/LoginSteps.java
deleted file mode 100644
index fd533e0..0000000
--- a/integration-tests-cucumber/src/test/java/dlab/login/LoginSteps.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package dlab.login;
-
-import com.jayway.restassured.http.ContentType;
-import com.jayway.restassured.response.Response;
-import com.jayway.restassured.specification.RequestSpecification;
-import cucumber.api.java.en.Given;
-import cucumber.api.java.en.Then;
-import cucumber.api.java.en.When;
-import gherkin.deps.com.google.gson.JsonObject;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import static com.jayway.restassured.RestAssured.given;
-import static dlab.Constants.API_URI;
-import static org.hamcrest.core.IsEqual.equalTo;
-import static org.junit.Assert.assertThat;
-
-public class LoginSteps {
-
-
-	private static final String LOGIN_RESOURCE_PATH = API_URI + "user/login";
-	private RequestSpecification request;
-	private Response response;
-
-	@Given("User try to login to Dlab with {string} and {string}")
-	public void userProvidedLoginAndPassword(String username, String password) {
-		JsonObject jsonObject = new JsonObject();
-		jsonObject.addProperty("username", username);
-		jsonObject.addProperty("password", password);
-		request = given().body(jsonObject.toString()).contentType(ContentType.JSON);
-	}
-
-	@When("user try to login")
-	public void userTryToLogin() throws URISyntaxException {
-		response = request.post(new URI(LOGIN_RESOURCE_PATH));
-	}
-
-	@Then("response code is {string}")
-	public void responseCodeIs(String status) {
-		assertThat(response.getStatusCode(), equalTo(Integer.valueOf(status)));
-
-	}
-}
diff --git a/integration-tests-cucumber/src/test/resources/dlab/endpoint.feature b/integration-tests-cucumber/src/test/resources/dlab/endpoint.feature
deleted file mode 100644
index 1f7fe14..0000000
--- a/integration-tests-cucumber/src/test/resources/dlab/endpoint.feature
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-Feature: Endpoint management in DLab
-  Such feature allowed to manage endpoint inside DLab
-
-  Scenario Outline: Create new endpoint when it does not exist
-
-    Given There is no endpoints in DLab
-    And User try to create new endpoint with name "<name>" and uri "<uri>" and account "<account>" and "<tag>"
-    When User send create new endpoint request
-    Then Response status code is 200
-    And Endpoint URI is present in location header
-    Examples:
-      | name          | uri     | account   | tag      |
-      | test_endpoint | someuri | 123231312 | some_tag |
-
-
-  Scenario Outline: Create new endpoint when it exist already
-
-    Given There is no endpoints in DLab
-    And User try to create new endpoint with name "<name>" and uri "<uri>" and account "<account>" and "<tag>"
-    And  User send create new endpoint request
-    When User try to create new endpoint with name "<name>" and uri "<uri>" and account "<account>" and "<tag>"
-    And User send create new endpoint request
-    Then Response status code is 409
-    Examples:
-      | name          | uri     | account   | tag      |
-      | test_endpoint | someuri | 123231312 | some_tag |
-
-
-  Scenario Outline: Get information for endpoint
-
-    Given There is no endpoints in DLab
-    And User try to create new endpoint with name "<name>" and uri "<uri>" and account "<account>" and "<tag>"
-    And  User send create new endpoint request
-    When User try to get information about endpoint with name "<name>"
-    Then Response status code is 200
-    And Endpoint information is successfully returned with name "<name>", uri "<uri>", account "<account>", and tag "<tag>"
-    Examples:
-      | name          | uri     | account   | tag      |
-      | test_endpoint | someuri | 123231312 | some_tag |
-
-
-  Scenario: Get list of endpoints
-
-    Given There is no endpoints in DLab
-    And User try to create new endpoint with name "test1" and uri "someuri1" and account "123" and "customTag1"
-    And  User send create new endpoint request
-    And User try to create new endpoint with name "test2" and uri "someuri2" and account "1233" and "customTag4"
-    And  User send create new endpoint request
-    When User try to get information about endpoints
-    Then Response status code is 200
-    And There are endpoints with name test1 and test2
-
-  Scenario: Get not endpoint that does not exist
-
-    Given There is no endpoints in DLab
-    When User try to get information about endpoint with name "someName"
-    Then Response status code is 404
diff --git a/integration-tests-cucumber/src/test/resources/dlab/login.feature b/integration-tests-cucumber/src/test/resources/dlab/login.feature
deleted file mode 100644
index 1675aad..0000000
--- a/integration-tests-cucumber/src/test/resources/dlab/login.feature
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-Feature: DLab login API
-  Used to check DLab login flow
-
-  Scenario Outline: User try to login to DLab
-    Given User try to login to Dlab with "<username>" and "<password>"
-    When user try to login
-    Then response code is "<status>"
-
-    Examples:
-      | username       | password | status |
-      | test           | pass     | 200    |
-      | not_valid_user | pass     | 401    |
\ No newline at end of file
diff --git a/integration-tests/examples/config.properties b/integration-tests/examples/config.properties
deleted file mode 100644
index 4ee463e..0000000
--- a/integration-tests/examples/config.properties
+++ /dev/null
@@ -1,89 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-JENKINS_USERNAME=
-JENKINS_PASSWORD=
-USERNAME=
-PASSWORD=
-NOT_IAM_USERNAME=
-NOT_IAM_PASSWORD=
-NOT_DLAB_USERNAME=
-NOT_DLAB_PASSWORD=
-JENKINS_JOB_URL=
-USER_FOR_ACTIVATE_KEY=
-PASSWORD_FOR_ACTIVATE_KEY=
-
-ACCESS_KEY_PRIV_FILE_NAME=
-ACCESS_KEY_PUB_FILE_NAME=
-
-AWS_ACCESS_KEY_ID=
-AWS_SECRET_ACCESS_KEY=
-AWS_REGION=
-AWS_REQUEST_TIMEOUT=10s
-
-TIMEOUT_JENKINS_AUTOTEST=20m
-TIMEOUT_UPLOAD_KEY=40m
-TIMEOUT_SSN_STARTUP=60m
-
-CLUSTER_OS_USERNAME=dlab-user
-CLUSTER_OS_FAMILY=debian
-
-#NOTEBOOKS_TO_TEST=\
-#		[\
-#			{\
-#				"notebook_template": "jupyter",\
-#				"data_engine_type": "dataengine",\
-#				"full_test": false,\
-#				"timeout_notebook_create": "60m",\
-#				"timeout_notebook_startup": "20m",\
-#				"timeout_notebook_shutdown": "10m",\
-#				"timeout_cluster_create": "60m",\
-#				"timeout_cluster_terminate": "20m",\
-#				"timeout_lib_groups": "5m",\
-#				"timeout_lib_list": "5m",\
-#				"timeout_lib_install": "15m"\
-#			},\
-#			{\
-#				"notebook_template": "rstudio",\
-#				"data_engine_type": "dataengine-service"\
-#			},\
-#                       {\
-#                               "notebook_template": "zeppelin",\
-#                               "data_engine_type": "dataengine-service"\
-#                       }\
-#		]
-
-NOTEBOOKS_TO_TEST=[\
-                        {\
-                                "notebook_template":"jupyter",\
-                                "data_engine_type":"dataengine",\
-                                "full_test":true\
-                        },\
-                        {\
-                                "notebook_template":"jupyter",\
-                                "data_engine_type":"dataengine-service"\
-                        }\
-                ]
-JUPYTER_SCENARIO_FILES=
-S3_TESTS_TEMPLATE_BUCKET_NAME=
-
-#RUN_MODE_LOCAL=true
-#USE_JENKINS=false
-#SSN_URL=https://localhost:8443
-#SERVICE_BASE_NAME=
diff --git a/integration-tests/examples/copy_files.py b/integration-tests/examples/copy_files.py
deleted file mode 100644
index 3ecc17d..0000000
--- a/integration-tests/examples/copy_files.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/python
-
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-import os, sys, json
-import argparse
-from fabric.api import *
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--storage', type=str, default='S3/GCP buckets, Azure Blob container / Datalake folder')
-parser.add_argument('--notebook', type=str, default='aws, azure, gcp')
-parser.add_argument('--cloud', type=str, default='aws, azure, gcp')
-parser.add_argument('--azure_storage_account', type=str, default='')
-parser.add_argument('--azure_datalake_account', type=str, default='')
-args = parser.parse_args()
-
-dataset_file = ['airports.csv', 'carriers.csv', '2008.csv.bz2']
-
-def download_dataset():
-    try:
-        for f in dataset_file:
-            local('wget http://stat-computing.org/dataexpo/2009/{0} -O /tmp/{0}'.format(f))
-    except Exception as err:
-        print('Failed to download test dataset', str(err))
-        sys.exit(1)
-
-def upload_aws():
-    try:
-        for f in dataset_file:
-            local('aws s3 cp /tmp/{0} s3://{1}/{2}_dataset/ --sse AES256'.format(f, args.storage, args.notebook))
-    except Exception as err:
-        print('Failed to upload test dataset to bucket', str(err))
-        sys.exit(1)
-
-def upload_azure_datalake():
-    try:
-        from azure.datalake.store import core, lib, multithread
-        sp_creds = json.loads(open(os.environ['AZURE_AUTH_LOCATION']).read())
-        dl_filesystem_creds = lib.auth(tenant_id=json.dumps(sp_creds['tenantId']).replace('"', ''),
-                                       client_secret=json.dumps(sp_creds['clientSecret']).replace('"', ''),
-                                       client_id=json.dumps(sp_creds['clientId']).replace('"', ''),
-                                       resource='https://datalake.azure.net/')
-        datalake_client = core.AzureDLFileSystem(dl_filesystem_creds, store_name=args.azure_datalake_account)
-        for f in dataset_file:
-            multithread.ADLUploader(datalake_client,
-                                    lpath='/tmp/{0}'.format(f),
-                                    rpath='{0}/{1}_dataset/{2}'.format(args.storage, args.notebook, f))
-    except Exception as err:
-        print('Failed to upload test dataset to datalake store', str(err))
-        sys.exit(1)
-
-def upload_azure_blob():
-    try:
-        from azure.mgmt.storage import StorageManagementClient
-        from azure.storage.blob import BlockBlobService
-        from azure.common.client_factory import get_client_from_auth_file
-        storage_client = get_client_from_auth_file(StorageManagementClient)
-        resource_group_name = ''
-        for i in storage_client.storage_accounts.list():
-            if args.storage.replace('container', 'storage') == str(i.tags.get('Name')):
-                resource_group_name = str(i.tags.get('SBN'))
-        secret_key = storage_client.storage_accounts.list_keys(resource_group_name, args.azure_storage_account).keys[0].value
-        block_blob_service = BlockBlobService(account_name=args.azure_storage_account, account_key=secret_key)
-        for f in dataset_file:
-            block_blob_service.create_blob_from_path(args.storage, '{0}_dataset/{1}'.format(args.notebook, f), '/tmp/{0}'.format(f))
-    except Exception as err:
-        print('Failed to upload test dataset to blob storage', str(err))
-        sys.exit(1)
-
-def upload_gcp():
-    try:
-        for f in dataset_file:
-            local('sudo gsutil -m cp /tmp/{0} gs://{1}/{2}_dataset/'.format(f, args.storage, args.notebook))
-    except Exception as err:
-        print('Failed to upload test dataset to bucket', str(err))
-        sys.exit(1)
-
-if __name__ == "__main__":
-    download_dataset()
-    if args.cloud == 'aws':
-        upload_aws()
-    elif args.cloud == 'azure':
-        os.environ['AZURE_AUTH_LOCATION'] = '/home/dlab-user/keys/azure_auth.json'
-        if args.azure_datalake_account:
-            upload_azure_datalake()
-        else:
-            upload_azure_blob()
-    elif args.cloud == 'gcp':
-        upload_gcp()
-    else:
-        print('Error! Unknown cloud provider.')
-        sys.exit(1)
-
-    sys.exit(0)
diff --git a/integration-tests/examples/scenario_deeplearning/deeplearning_tests.py b/integration-tests/examples/scenario_deeplearning/deeplearning_tests.py
deleted file mode 100644
index c4873ad..0000000
--- a/integration-tests/examples/scenario_deeplearning/deeplearning_tests.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/python
-
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-import os, sys, json
-from fabric.api import *
-import argparse
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--storage', type=str, default='')
-parser.add_argument('--cloud', type=str, default='')
-parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--cluster_name', type=str, default='')
-parser.add_argument('--azure_storage_account', type=str, default='')
-parser.add_argument('--azure_datalake_account', type=str, default='')
-args = parser.parse_args()
-
-
-def prepare_templates():
-    try:
-        local('/bin/bash -c "source /etc/profile && wget http://files.fast.ai/data/dogscats.zip -O /tmp/dogscats.zip"')
-        local('unzip -q /tmp/dogscats.zip -d /tmp')
-        local('/bin/bash -c "mkdir -p /home/{0}/{1}"'.format(args.os_user, "{test,train}"))
-        local('mv /tmp/dogscats/test1/* /home/{0}/test'.format(args.os_user))
-        local('/bin/bash -c "mv /tmp/dogscats/valid/{0}/* /home/{1}/train"'.format("{cats,dogs}", args.os_user))
-        local('/bin/bash -c "mv /tmp/dogscats/train/{0}/* /home/{1}/train"'.format("{cats,dogs}", args.os_user))
-    except Exception as err:
-        print('Failed to download/unpack image dataset!', str(err))
-        sys.exit(1)
-    local('mkdir -p /home/{0}/logs'.format(args.os_user))
-    local('mv /tmp/deeplearning /home/{0}/test_templates'.format(args.os_user))
-
-def get_storage():
-    storages = {"aws": args.storage,
-                "azure": "{0}@{1}.blob.core.windows.net".format(args.storage, args.azure_storage_account),
-                "gcp": args.storage}
-    protocols = {"aws": "s3a", "azure": "wasbs", "gcp": "gs"}
-    if args.azure_datalake_account:
-        storages['azure'] = "{0}.azuredatalakestore.net/{1}".format(args.azure_datalake_account, args.storage)
-        protocols['azure'] = 'adl'
-    return (storages[args.cloud], protocols[args.cloud])
-
-def prepare_ipynb(kernel_name, template_path, ipynb_name):
-    with open(template_path, 'r') as f:
-        text = f.read()
-    text = text.replace('KERNEL_NAME', kernel_name)
-    with open('/home/{}/{}.ipynb'.format(args.os_user, ipynb_name), 'w') as f:
-        f.write(text)
-
-def run_ipynb(ipynb_name):
-    local('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64:/usr/lib64/openmpi/lib; ' \
-            'jupyter nbconvert --ExecutePreprocessor.timeout=-1 --ExecutePreprocessor.startup_timeout=300 --execute /home/{}/{}.ipynb'.format(args.os_user, ipynb_name))
-
-def run_tensor():
-    interpreters = ['pyspark_local']
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_preparation_tensor.ipynb'.format(args.os_user), 'preparation_tensor')
-        run_ipynb('preparation_tensor')
-        prepare_ipynb(i, '/home/{}/test_templates/template_visualization_tensor.ipynb'.format(args.os_user), 'visualization_tensor')
-        run_ipynb('visualization_tensor')
-
-def run_cntk():
-    interpreters = ['pyspark_local']
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_cntk.ipynb'.format(args.os_user), 'test_cntk')
-        run_ipynb('test_cntk')
-
-def run_keras():
-    interpreters = ['pyspark_local']
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_keras.ipynb'.format(args.os_user), 'test_keras')
-        run_ipynb('test_keras')
-
-def run_mxnet():
-    interpreters = ['pyspark_local']
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_mxnet.ipynb'.format(args.os_user), 'test_mxnet')
-        run_ipynb('test_mxnet')
-
-def run_theano():
-    interpreters = ['pyspark_local']
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_theano.ipynb'.format(args.os_user), 'test_theano')
-        run_ipynb('test_theano')
-
-def run_torch():
-    interpreters = ['itorch']
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_torch.ipynb'.format(args.os_user), 'test_torch')
-        run_ipynb('test_torch')
-
-
-if __name__ == "__main__":
-    try:
-        prepare_templates()
-        run_tensor()
-        run_cntk()
-        run_keras()
-        run_mxnet()
-        run_theano()
-        run_torch()
-    except Exception as err:
-        print('Error!', str(err))
-        sys.exit(1)
-
-    sys.exit(0)
\ No newline at end of file
diff --git a/integration-tests/examples/scenario_jupyter/jupyter_tests.py b/integration-tests/examples/scenario_jupyter/jupyter_tests.py
deleted file mode 100644
index 018e678..0000000
--- a/integration-tests/examples/scenario_jupyter/jupyter_tests.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/python
-
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-import os, sys, json
-from fabric.api import *
-import argparse
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--storage', type=str, default='')
-parser.add_argument('--cloud', type=str, default='')
-parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--cluster_name', type=str, default='')
-parser.add_argument('--azure_storage_account', type=str, default='')
-parser.add_argument('--azure_datalake_account', type=str, default='')
-args = parser.parse_args()
-
-
-def prepare_templates():
-    local('mv /tmp/jupyter /home/{0}/test_templates'.format(args.os_user))
-
-def get_storage():
-    storages = {"aws": args.storage,
-                "azure": "{0}@{1}.blob.core.windows.net".format(args.storage, args.azure_storage_account),
-                "gcp": args.storage}
-    protocols = {"aws": "s3a", "azure": "wasbs", "gcp": "gs"}
-    if args.azure_datalake_account:
-        storages['azure'] = "{0}.azuredatalakestore.net/{1}".format(args.azure_datalake_account, args.storage)
-        protocols['azure'] = 'adl'
-    return (storages[args.cloud], protocols[args.cloud])
-
-def prepare_ipynb(kernel_name, template_path, ipynb_name):
-    with open(template_path, 'r') as f:
-        text = f.read()
-    text = text.replace('WORKING_STORAGE', get_storage()[0])
-    text = text.replace('PROTOCOL_NAME', get_storage()[1])
-    text = text.replace('KERNEL_NAME', kernel_name)
-    with open('/home/{}/{}.ipynb'.format(args.os_user, ipynb_name), 'w') as f:
-        f.write(text)
-
-def run_ipynb(ipynb_name):
-    local('jupyter nbconvert --ExecutePreprocessor.timeout=-1 --ExecutePreprocessor.startup_timeout=300 --execute /home/{}/{}.ipynb'.format(args.os_user, ipynb_name))
-
-def run_pyspark():
-    interpreters = ['pyspark_local', 'pyspark_' + args.cluster_name]
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_preparation_pyspark.ipynb'.format(args.os_user),
-                      'preparation_pyspark')
-        run_ipynb('preparation_pyspark')
-        prepare_ipynb(i, '/home/{}/test_templates/template_visualization_pyspark.ipynb'.format(args.os_user),
-                      'visualization_pyspark')
-        run_ipynb('visualization_pyspark')
-
-def run_spark():
-    interpreters = ['apache_toree_scala', 'toree_' + args.cluster_name]
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_preparation_spark.ipynb'.format(args.os_user),
-                      'preparation_spark')
-        run_ipynb('preparation_spark')
-
-def run_sparkr():
-    interpreters = ['ir', 'r_' + args.cluster_name]
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_preparation_sparkr.ipynb'.format(args.os_user),
-                      'preparation_sparkr')
-        run_ipynb('preparation_sparkr')
-        prepare_ipynb(i, '/home/{}/test_templates/template_visualization_sparkr.ipynb'.format(args.os_user),
-                      'visualization_sparkr')
-        run_ipynb('visualization_sparkr')
-
-
-if __name__ == "__main__":
-    try:
-        prepare_templates()
-        run_pyspark()
-        run_spark()
-        run_sparkr()
-    except Exception as err:
-        print('Error!', str(err))
-        sys.exit(1)
-
-    sys.exit(0)
\ No newline at end of file
diff --git a/integration-tests/examples/scenario_rstudio/rstudio_tests.py b/integration-tests/examples/scenario_rstudio/rstudio_tests.py
deleted file mode 100644
index d9a1540..0000000
--- a/integration-tests/examples/scenario_rstudio/rstudio_tests.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/python
-
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-import os, sys, json
-from fabric.api import *
-import argparse
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--storage', type=str, default='')
-parser.add_argument('--cloud', type=str, default='')
-parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--cluster_name', type=str, default='')
-parser.add_argument('--azure_storage_account', type=str, default='')
-parser.add_argument('--azure_datalake_account', type=str, default='')
-args = parser.parse_args()
-
-
-def prepare_templates():
-    local('mv /tmp/rstudio /home/{0}/test_templates'.format(args.os_user))
-
-def get_storage():
-    storages = {"aws": args.storage,
-                "azure": "{0}@{1}.blob.core.windows.net".format(args.storage, args.azure_storage_account),
-                "gcp": args.storage}
-    protocols = {"aws": "s3a", "azure": "wasbs", "gcp": "gs"}
-    if args.azure_datalake_account:
-        storages['azure'] = "{0}.azuredatalakestore.net/{1}".format(args.azure_datalake_account, args.storage)
-        protocols['azure'] = 'adl'
-    return (storages[args.cloud], protocols[args.cloud])
-
-def prepare_rscript(template_path, rscript_name, kernel='remote'):
-    with open(template_path, 'r') as f:
-        text = f.read()
-    text = text.replace('WORKING_STORAGE', get_storage()[0])
-    text = text.replace('PROTOCOL_NAME', get_storage()[1])
-    if kernel == 'remote':
-        if '-de-' in args.cluster_name:
-            text = text.replace('MASTER', 'master')
-        elif '-des-' in args.cluster_name:
-            text = text.replace('MASTER', 'master = "yarn"')
-    elif kernel == 'local':
-        text = text.replace('MASTER', 'master = "local[*]"')
-    with open('/home/{}/{}.r'.format(args.os_user, rscript_name), 'w') as f:
-        f.write(text)
-
-def enable_local_kernel():
-    local("sed -i 's/^master/#master/' /home/{0}/.Rprofile".format(args.os_user))
-    local('''sed -i "s/^/#/g" /home/{0}/.Renviron'''.format(args.os_user))
-    local('''sed -i "/\/opt\/spark\//s/#//g" /home/{0}/.Renviron'''.format(args.os_user))
-    local('rm -f metastore_db/db* derby.log')
-
-def run_rscript(rscript_name):
-    local('R < /home/{0}/{1}.r --no-save'.format(args.os_user, rscript_name))
-
-
-if __name__ == "__main__":
-    try:
-        prepare_templates()
-        # Running on remote kernel
-        prepare_rscript('/home/{}/test_templates/template_preparation.r'.format(args.os_user), 'preparation', 'remote')
-        run_rscript('preparation')
-        prepare_rscript('/home/{}/test_templates/template_visualization.r'.format(args.os_user), 'visualization', 'remote')
-        run_rscript('visualization')
-        # Running on local kernel
-        enable_local_kernel()
-        prepare_rscript('/home/{}/test_templates/template_preparation.r'.format(args.os_user), 'preparation', 'local')
-        prepare_rscript('/home/{}/test_templates/template_visualization.r'.format(args.os_user), 'visualization', 'local')
-        run_rscript('preparation')
-        run_rscript('visualization')
-    except Exception as err:
-        print('Error!', str(err))
-        sys.exit(1)
-
-    sys.exit(0)
diff --git a/integration-tests/examples/scenario_tensor/tensor_tests.py b/integration-tests/examples/scenario_tensor/tensor_tests.py
deleted file mode 100644
index 3bbb93b..0000000
--- a/integration-tests/examples/scenario_tensor/tensor_tests.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/python
-
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-import os, sys, json
-from fabric.api import *
-import argparse
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--storage', type=str, default='')
-parser.add_argument('--cloud', type=str, default='')
-parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--cluster_name', type=str, default='')
-parser.add_argument('--azure_storage_account', type=str, default='')
-parser.add_argument('--azure_datalake_account', type=str, default='')
-args = parser.parse_args()
-
-
-def prepare_templates():
-    try:
-        local('/bin/bash -c "source /etc/profile && wget http://files.fast.ai/data/dogscats.zip -O /tmp/dogscats.zip"')
-        local('unzip -q /tmp/dogscats.zip -d /tmp')
-        local('/bin/bash -c "mkdir -p /home/{0}/{1}"'.format(args.os_user, "{test,train}"))
-        local('mv /tmp/dogscats/test1/* /home/{0}/test'.format(args.os_user))
-        local('/bin/bash -c "mv /tmp/dogscats/valid/{0}/* /home/{1}/train"'.format("{cats,dogs}", args.os_user))
-        local('/bin/bash -c "mv /tmp/dogscats/train/{0}/* /home/{1}/train"'.format("{cats,dogs}", args.os_user))
-    except Exception as err:
-        print('Failed to download/unpack image dataset!', str(err))
-        sys.exit(1)
-    local('mkdir -p /home/{0}/logs'.format(args.os_user))
-    local('mv /tmp/tensor /home/{0}/test_templates'.format(args.os_user))
-
-def get_storage():
-    storages = {"aws": args.storage,
-                "azure": "{0}@{1}.blob.core.windows.net".format(args.storage, args.azure_storage_account),
-                "gcp": args.storage}
-    protocols = {"aws": "s3a", "azure": "wasbs", "gcp": "gs"}
-    if args.azure_datalake_account:
-        storages['azure'] = "{0}.azuredatalakestore.net/{1}".format(args.azure_datalake_account, args.storage)
-        protocols['azure'] = 'adl'
-    return (storages[args.cloud], protocols[args.cloud])
-
-def prepare_ipynb(kernel_name, template_path, ipynb_name):
-    with open(template_path, 'r') as f:
-        text = f.read()
-    text = text.replace('KERNEL_NAME', kernel_name)
-    with open('/home/{}/{}.ipynb'.format(args.os_user, ipynb_name), 'w') as f:
-        f.write(text)
-
-def run_ipynb(ipynb_name):
-    local('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; ' \
-            'jupyter nbconvert --ExecutePreprocessor.timeout=-1 --ExecutePreprocessor.startup_timeout=300 --execute /home/{}/{}.ipynb'.format(args.os_user, ipynb_name))
-
-def run_tensor():
-    interpreters = ['pyspark_local']
-    for i in interpreters:
-        prepare_ipynb(i, '/home/{}/test_templates/template_preparation_tensor.ipynb'.format(args.os_user), 'preparation_tensor')
-        run_ipynb('preparation_tensor')
-        prepare_ipynb(i, '/home/{}/test_templates/template_visualization_tensor.ipynb'.format(args.os_user), 'visualization_tensor')
-        run_ipynb('visualization_tensor')
-
-
-if __name__ == "__main__":
-    try:
-        prepare_templates()
-        run_tensor()
-    except Exception as err:
-        print('Error!', str(err))
-        sys.exit(1)
-
-    sys.exit(0)
\ No newline at end of file
diff --git a/integration-tests/examples/scenario_zeppelin/zeppelin_tests.py b/integration-tests/examples/scenario_zeppelin/zeppelin_tests.py
deleted file mode 100644
index 1c9f05c..0000000
--- a/integration-tests/examples/scenario_zeppelin/zeppelin_tests.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python
-
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-import os, sys, json
-from fabric.api import *
-import argparse
-import requests
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--storage', type=str, default='')
-parser.add_argument('--cloud', type=str, default='')
-parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--cluster_name', type=str, default='')
-parser.add_argument('--azure_storage_account', type=str, default='')
-parser.add_argument('--azure_datalake_account', type=str, default='')
-args = parser.parse_args()
-
-
-def prepare_templates():
-    local('mv /tmp/zeppelin /home/{0}/test_templates'.format(args.os_user))
-
-def get_storage():
-    storages = {"aws": args.storage,
-                "azure": "{0}@{1}.blob.core.windows.net".format(args.storage, args.azure_storage_account),
-                "gcp": args.storage}
-    protocols = {"aws": "s3a", "azure": "wasbs", "gcp": "gs"}
-    if args.azure_datalake_account:
-        storages['azure'] = "{0}.azuredatalakestore.net/{1}".format(args.azure_datalake_account, args.storage)
-        protocols['azure'] = 'adl'
-    return (storages[args.cloud], protocols[args.cloud])
-
-def get_note_status(note_id, notebook_ip):
-    running = False
-    local('sleep 5')
-    response = requests.get('http://{0}:8080/api/notebook/job/{1}'.format(notebook_ip, note_id))
-    status = json.loads(response.content)
-    for i in status.get('body'):
-        if i.get('status') == "RUNNING" or i.get('status') == "PENDING":
-            print('Notebook status: {}'.format(i.get('status')))
-            running = True
-        elif i.get('status') == "ERROR":
-            print('Error in notebook')
-            sys.exit(1)
-    if running:
-        local('sleep 5')
-        get_note_status(note_id, notebook_ip)
-    else:
-        return "OK"
-
-def import_note(note_path, notebook_ip):
-    headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Expires': '0'}
-    response = requests.post('http://{0}:8080/api/notebook/import'.format(notebook_ip), data=open(note_path, 'rb'), headers=headers)
-    status = json.loads(response.content)
-    if status.get('status') == 'OK':
-        print('Imported notebook: {}'.format(note_path))
-        return status.get('body')
-    else:
-        print('Failed to import notebook')
-        sys.exit(1)
-
-def prepare_note(interpreter_name, template_path, note_name):
-    with open(template_path, 'r') as f:
-        text = f.read()
-    text = text.replace('INTERPRETER_NAME', interpreter_name)
-    text = text.replace('WORKING_STORAGE', get_storage()[0])
-    text = text.replace('PROTOCOL_NAME', get_storage()[1])
-    with open(note_name, 'w') as f:
-        f.write(text)
-
-def run_note(note_id, notebook_ip):
-    response = requests.post('http://{0}:8080/api/notebook/job/{1}'.format(notebook_ip, note_id))
-    status = json.loads(response.content)
-    if status.get('status') == 'OK':
-        get_note_status(note_id, notebook_ip)
-    else:
-        print('Failed to run notebook')
-        sys.exit(1)
-
-def remove_note(note_id, notebook_ip):
-    response = requests.delete('http://{0}:8080/api/notebook/{1}'.format(notebook_ip, note_id))
-    status = json.loads(response.content)
-    if status.get('status') == 'OK':
-        return "OK"
-    else:
-        sys.exit(1)
-
-def restart_interpreter(notebook_ip, interpreter):
-    response = requests.get('http://{0}:8080/api/interpreter/setting'.format(notebook_ip))
-    status = json.loads(response.content)
-    if status.get('status') == 'OK':
-        id = [i['id'] for i in status['body'] if i['name'] in interpreter][0]
-        response = requests.put('http://{0}:8080/api/interpreter/setting/restart/{1}'.format(notebook_ip, id))
-        status = json.loads(response.content)
-        if status.get('status') == 'OK':
-            local('sleep 5')
-            return "OK"
-        else:
-            print('Failed to restart interpreter')
-            sys.exit(1)
-    else:
-        print('Failed to get interpreter settings')
-        sys.exit(1)
-
-def run_pyspark():
-    interpreters = ['local_interpreter_python2.pyspark', args.cluster_name + "_py2.pyspark"]
-    for i in interpreters:
-        prepare_note(i, '/home/{}/test_templates/template_preparation_pyspark.json'.format(args.os_user),
-                     '/home/{}/preparation_pyspark.json'.format(args.os_user))
-        note_id = import_note('/home/{}/preparation_pyspark.json'.format(args.os_user), notebook_ip)
-        run_note(note_id, notebook_ip)
-        remove_note(note_id, notebook_ip)
-        prepare_note(i, '/home/{}/test_templates/template_visualization_pyspark.json'.format(args.os_user),
-                     '/home/{}/visualization_pyspark.json'.format(args.os_user))
-        note_id = import_note('/home/{}/visualization_pyspark.json'.format(args.os_user), notebook_ip)
-        run_note(note_id, notebook_ip)
-        remove_note(note_id, notebook_ip)
-        restart_interpreter(notebook_ip, i)
-
-def run_sparkr():
-    if os.path.exists('/opt/livy/'):
-        interpreters = ['local_interpreter_python2.sparkr', args.cluster_name + "_py2.sparkr"]
-    else:
-        interpreters = ['local_interpreter_python2.r', args.cluster_name + "_py2.r"]
-    for i in interpreters:
-        prepare_note(i, '/home/{}/test_templates/template_preparation_sparkr.json'.format(args.os_user),
-                     '/home/{}/preparation_sparkr.json'.format(args.os_user))
-        note_id = import_note('/home/{}/preparation_sparkr.json'.format(args.os_user), notebook_ip)
-        run_note(note_id, notebook_ip)
-        remove_note(note_id, notebook_ip)
-        prepare_note(i, '/home/{}/test_templates/template_visualization_sparkr.json'.format(args.os_user),
-                     '/home/{}/visualization_sparkr.json'.format(args.os_user))
-        note_id = import_note('/home/{}/visualization_sparkr.json'.format(args.os_user), notebook_ip)
-        run_note(note_id, notebook_ip)
-        remove_note(note_id, notebook_ip)
-        restart_interpreter(notebook_ip, i)
-
-def run_spark():
-    interpreters = ['local_interpreter_python2.spark', args.cluster_name + "_py2.spark"]
-    for i in interpreters:
-        prepare_note(i, '/home/{}/test_templates/template_preparation_spark.json'.format(args.os_user),
-                     '/home/{}/preparation_spark.json'.format(args.os_user))
-        note_id = import_note('/home/{}/preparation_spark.json'.format(args.os_user), notebook_ip)
-        run_note(note_id, notebook_ip)
-        remove_note(note_id, notebook_ip)
-        restart_interpreter(notebook_ip, i)
-
-
-if __name__ == "__main__":
-    try:
-        notebook_ip = local('hostname -I', capture=True)
-        prepare_templates()
-        run_pyspark()
-        run_sparkr()
-        run_spark()
-    except Exception as err:
-        print('Error!', str(err))
-        sys.exit(1)
-
-    sys.exit(0)
\ No newline at end of file
diff --git a/integration-tests/examples/test_templates/deeplearning/conv.prototxt b/integration-tests/examples/test_templates/deeplearning/conv.prototxt
deleted file mode 100644
index 0343891..0000000
--- a/integration-tests/examples/test_templates/deeplearning/conv.prototxt
+++ /dev/null
@@ -1,48 +0,0 @@
-# *****************************************************************************
-#
-#  Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing,
-#  software distributed under the License is distributed on an
-#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#  KIND, either express or implied.  See the License for the
-#  specific language governing permissions and limitations
-#  under the License.
-#
-# ******************************************************************************
-
-# Simple single-layer network to showcase editing model parameters.
-name: "convolution"
-layer {
-  name: "data"
-  type: "Input"
-  top: "data"
-  input_param { shape: { dim: 1 dim: 1 dim: 100 dim: 100 } }
-}
-layer {
-  name: "conv"
-  type: "Convolution"
-  bottom: "data"
-  top: "conv"
-  convolution_param {
-    num_output: 3
-    kernel_size: 5
-    stride: 1
-    weight_filler {
-      type: "gaussian"
-      std: 0.01
-    }
-    bias_filler {
-      type: "constant"
-      value: 0
-    }
-  }
-}
diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml
deleted file mode 100644
index f6dd75d..0000000
--- a/integration-tests/pom.xml
+++ /dev/null
@@ -1,315 +0,0 @@
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~   http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing,
-  ~ software distributed under the License is distributed on an
-  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  ~ KIND, either express or implied.  See the License for the
-  ~ specific language governing permissions and limitations
-  ~ under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>com.epam.dlab</groupId>
-    <artifactId>integration-tests</artifactId>
-    <version>1.0</version>
-    <packaging>jar</packaging>
-
-	<scm>
-    	<connection>scm:git:file://.</connection>
-    	<url>scm:git:file://.</url>
-    	<tag>HEAD</tag>
-	</scm>
-
-    <properties>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-        <java.version>1.8</java.version>
-        <maven-compiler-plugin.version>3.5.1</maven-compiler-plugin.version>
-        
-        <properties-maven-plugin.version>1.0.0</properties-maven-plugin.version>
-        <buildnumber-maven-plugin.version>1.4</buildnumber-maven-plugin.version>
-        <maven-shade-plugin.version>2.4.3</maven-shade-plugin.version>
-        <maven.build.timestamp.format>yyyy-MM-dd HH:mm:ss</maven.build.timestamp.format>
-    </properties>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-            <version>6.10</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.jayway.restassured</groupId>
-            <artifactId>rest-assured</artifactId>
-            <version>2.9.0</version>
-        </dependency>
-        
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-annotations</artifactId>
-            <version>2.9.7</version>
-        </dependency>
-        
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-core</artifactId>
-            <version>2.9.7</version>
-        </dependency>
-        
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-databind</artifactId>
-            <version>2.9.7</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.amazonaws</groupId>
-            <artifactId>aws-java-sdk-ec2</artifactId>
-            <version>1.9.13</version>
-        </dependency>
-        
-		<dependency>
-    		<groupId>com.amazonaws</groupId>
-    		<artifactId>aws-java-sdk-s3</artifactId>
-    		<version>1.9.13</version>
-		</dependency>
-
-		<dependency>
-			<groupId>org.apache.logging.log4j</groupId>
-			<artifactId>log4j-api</artifactId>
-			<version>2.8.2</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.logging.log4j</groupId>
-			<artifactId>log4j-core</artifactId>
-			<version>2.8.2</version>
-		</dependency>
-		<dependency>
-			<groupId>org.slf4j</groupId>
-			<artifactId>slf4j-simple</artifactId>
-			<version>1.7.25</version>
-		</dependency>
-
-        <dependency>
-            <groupId>com.github.docker-java</groupId>
-            <artifactId>docker-java</artifactId>
-            <version>3.0.6</version>
-        </dependency>
-        
-        <dependency>
-            <groupId>org.scijava</groupId>
-            <artifactId>native-lib-loader</artifactId>
-            <version>2.0.2</version>
-        </dependency>
-        
-        <dependency>
-            <groupId>com.jcraft</groupId>
-            <artifactId>jsch</artifactId>
-            <version>0.1.54</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-core</artifactId>
-            <version>2.13.0</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.microsoft.azure</groupId>
-            <artifactId>azure</artifactId>
-            <version>1.5.1</version>
-        </dependency>
-        <dependency>
-            <groupId>com.microsoft.azure</groupId>
-            <artifactId>azure-mgmt-compute</artifactId>
-            <version>1.5.1</version>
-        </dependency>
-        <dependency>
-            <groupId>com.microsoft.azure</groupId>
-            <artifactId>azure-mgmt-resources</artifactId>
-            <version>1.5.1</version>
-        </dependency>
-        <dependency>
-            <groupId>com.microsoft.azure</groupId>
-            <artifactId>azure-mgmt-network</artifactId>
-            <version>1.5.1</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.google.cloud</groupId>
-            <artifactId>google-cloud-compute</artifactId>
-            <version>0.34.0-alpha</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.squareup.okio</groupId>
-            <artifactId>okio</artifactId>
-            <version>1.13.0</version>
-        </dependency>
-        <dependency>
-            <groupId>com.nimbusds</groupId>
-            <artifactId>nimbus-jose-jwt</artifactId>
-            <version>7.9</version>
-        </dependency>
-        <dependency>
-            <groupId>net.minidev</groupId>
-            <artifactId>json-smart</artifactId>
-            <version>2.3</version>
-        </dependency>
-        <dependency>
-            <groupId>javax.mail</groupId>
-            <artifactId>javax.mail-api</artifactId>
-            <version>1.6.0</version>
-        </dependency>
-        <dependency>
-            <groupId>org.projectlombok</groupId>
-            <artifactId>lombok</artifactId>
-            <version>1.16.18</version>
-        </dependency>
-    </dependencies>
-
-    <build>
-    	<finalName>${project.artifactId}-${dlab.version}</finalName>
-        <plugins>
-            <plugin>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>${maven-compiler-plugin.version}</version>
-                <configuration>
-                    <source>${java.version}</source>
-                    <target>${java.version}</target>
-                    
-                </configuration>
-            </plugin>
-            
-        	<plugin>
-				<groupId>org.codehaus.mojo</groupId>
-				<artifactId>properties-maven-plugin</artifactId>
-				<version>${properties-maven-plugin.version}</version>
-				<executions>
-					<execution>
-						<phase>initialize</phase>
-						<goals>
-							<goal>read-project-properties</goal>
-						</goals>
-						<configuration>
-							<files>
-								<file>../build.properties</file>
-							</files>
-						</configuration>
-					</execution>
-				</executions>
-			</plugin>
-			
-			<plugin>
-				<groupId>org.codehaus.mojo</groupId>
-				<artifactId>buildnumber-maven-plugin</artifactId>
-				<version>${buildnumber-maven-plugin.version}</version>
-				<executions>
-					<execution>
-						<phase>validate</phase>
-						<goals>
-							<goal>create</goal>
-						</goals>
-					</execution>
-				</executions>
-				<configuration>
-					<doCheck>false</doCheck>
-					<doUpdate>false</doUpdate>
-					<getRevisionOnlyOnce>true</getRevisionOnlyOnce>
-					<revisionOnScmFailure>none</revisionOnScmFailure>
-				</configuration>
-      		</plugin>
-			
-            <plugin>
-                <artifactId>maven-shade-plugin</artifactId>
-                <version>${maven-shade-plugin.version}</version>
-                <executions>
-                    <execution>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>shade</goal>
-                        </goals>
-                        <configuration>
-                            <createDependencyReducedPom>false</createDependencyReducedPom>
-                            <transformers>
-                                <transformer
-                                        implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
-                                <transformer
-                                        implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
-<!--                                     <mainClass>com.epam.dlab.automation</mainClass> -->
-									<manifestEntries>
-										<Created-By>&lt;EPAM&gt; Systems</Created-By>
-										<Name>com/epam/dlab/automation</Name>
-										<Implementation-Title>DLab Integration Tests</Implementation-Title>
-										<Implementation-Version>${dlab.version}</Implementation-Version>
-										<Implementation-Vendor>&lt;EPAM&gt; Systems</Implementation-Vendor>
-										<Build-Time>${maven.build.timestamp}</Build-Time>
-										<Build-OS>${os.name}</Build-OS>
-										<GIT-Branch>${scmBranch}</GIT-Branch>
-										<GIT-Commit>${buildNumber}</GIT-Commit>
-									</manifestEntries>
-                                </transformer>
-                            </transformers>
-                            <filters>
-                            	<filter>
-                            		<artifact>*:*</artifact>
-                            		<excludes>
-										<exclude>META-INF/*.SF</exclude>
-				                		<exclude>META-INF/*.DSA</exclude>
-				                		<exclude>META-INF/*.RSA</exclude>
-			                		</excludes>
-                            	</filter>
-                            </filters>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.rat</groupId>
-                <artifactId>apache-rat-plugin</artifactId>
-                <version>0.7</version>
-                <configuration>
-                    <excludes>
-                        <exclude>.git/**</exclude>
-                        <exclude>.idea/**</exclude>
-                        <exclude>**/.gitignore</exclude>
-                        <exclude>**/*.ini</exclude>
-                        <exclude>**/*.json</exclude>
-                        <exclude>**/__init__.py</exclude>
-                        <exclude>**/*.conf</exclude>
-                        <exclude>**/.gitkeep</exclude>
-                        <exclude>**/*.lst</exclude>
-                        <exclude>**/*template</exclude>
-                    </excludes>
-                </configuration>
-                <executions>
-                    <execution>
-                        <phase>verify</phase>
-                        <goals>
-                            <goal>check</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-surefire-plugin</artifactId>
-                <configuration>
-                    <useSystemClassLoader>false</useSystemClassLoader>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/VirtualMachineStatusChecker.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/VirtualMachineStatusChecker.java
deleted file mode 100644
index 0ecff1d..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/VirtualMachineStatusChecker.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.cloud;
-
-import com.epam.dlab.automation.cloud.aws.AmazonHelper;
-import com.epam.dlab.automation.cloud.aws.AmazonInstanceState;
-import com.epam.dlab.automation.cloud.azure.AzureHelper;
-import com.epam.dlab.automation.cloud.gcp.GcpHelper;
-import com.epam.dlab.automation.cloud.gcp.GcpInstanceState;
-import com.epam.dlab.automation.helper.CloudProvider;
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.microsoft.azure.management.compute.PowerState;
-import org.testng.Assert;
-
-import java.io.IOException;
-
-public class VirtualMachineStatusChecker {
-
-	private static final String UNKNOWN_CLOUD_PROVIDER = "Unknown cloud provider";
-
-	private VirtualMachineStatusChecker(){}
-
-    public static void checkIfRunning(String tagNameValue, boolean restrictionMode)
-			throws InterruptedException, IOException {
-
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                AmazonHelper.checkAmazonStatus(tagNameValue, AmazonInstanceState.RUNNING);
-                break;
-            case CloudProvider.AZURE_PROVIDER:
-                AzureHelper.checkAzureStatus(tagNameValue, PowerState.RUNNING, restrictionMode);
-                break;
-            case CloudProvider.GCP_PROVIDER:
-                GcpHelper.checkGcpStatus(tagNameValue, ConfigPropertyValue.getGcpDlabProjectId(),
-                        GcpInstanceState.RUNNING, restrictionMode,
-                        GcpHelper.getAvailableZonesForProject(ConfigPropertyValue.getGcpDlabProjectId()));
-                break;
-            default:
-                Assert.fail(UNKNOWN_CLOUD_PROVIDER);
-        }
-
-    }
-
-    public static void checkIfTerminated(String tagNameValue, boolean restrictionMode)
-			throws InterruptedException, IOException {
-
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                AmazonHelper.checkAmazonStatus(tagNameValue, AmazonInstanceState.TERMINATED);
-                break;
-            case CloudProvider.AZURE_PROVIDER:
-                AzureHelper.checkAzureStatus(tagNameValue, PowerState.STOPPED, restrictionMode);
-                break;
-            case CloudProvider.GCP_PROVIDER:
-                GcpHelper.checkGcpStatus(tagNameValue, ConfigPropertyValue.getGcpDlabProjectId(),
-                        GcpInstanceState.TERMINATED, restrictionMode,
-                        GcpHelper.getAvailableZonesForProject(ConfigPropertyValue.getGcpDlabProjectId()));
-                break;
-            default:
-                Assert.fail(UNKNOWN_CLOUD_PROVIDER);
-        }
-
-    }
-
-    public static void checkIfStopped(String tagNameValue, boolean restrictionMode)
-			throws InterruptedException, IOException {
-
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                AmazonHelper.checkAmazonStatus(tagNameValue, AmazonInstanceState.STOPPED);
-                break;
-            case CloudProvider.AZURE_PROVIDER:
-                AzureHelper.checkAzureStatus(tagNameValue, PowerState.DEALLOCATED, restrictionMode);
-                break;
-            case CloudProvider.GCP_PROVIDER:
-                GcpHelper.checkGcpStatus(tagNameValue, ConfigPropertyValue.getGcpDlabProjectId(),
-                        GcpInstanceState.TERMINATED, restrictionMode,
-                        GcpHelper.getAvailableZonesForProject(ConfigPropertyValue.getGcpDlabProjectId()));
-                break;
-            default:
-                Assert.fail(UNKNOWN_CLOUD_PROVIDER);
-        }
-
-    }
-
-    public static String getStartingStatus() {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return AmazonInstanceState.STARTING.toString();
-            case CloudProvider.AZURE_PROVIDER:
-                return PowerState.STARTING.toString();
-            case CloudProvider.GCP_PROVIDER:
-				return GcpInstanceState.STARTING.toString();
-            default:
-                return "";
-        }
-
-    }
-
-    public static String getRunningStatus(){
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return AmazonInstanceState.RUNNING.toString();
-            case CloudProvider.AZURE_PROVIDER:
-                return PowerState.RUNNING.toString();
-            case CloudProvider.GCP_PROVIDER:
-                return GcpInstanceState.RUNNING.toString();
-            default:
-                return "";
-        }
-
-    }
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
deleted file mode 100644
index 3b0ef82..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.cloud.aws;
-
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.regions.Region;
-import com.amazonaws.regions.Regions;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.AmazonEC2Client;
-import com.amazonaws.services.ec2.model.*;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3Client;
-import com.amazonaws.services.s3.model.AccessControlList;
-import com.amazonaws.services.s3.model.Grant;
-import com.epam.dlab.automation.exceptions.CloudException;
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.epam.dlab.automation.helper.NamingHelper;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.List;
-
-public class AmazonHelper {
-
-    private static final Logger LOGGER = LogManager.getLogger(AmazonHelper.class);
-	private static final Duration CHECK_TIMEOUT = Duration.parse("PT10m");
-	private static final String LOCALHOST_IP = ConfigPropertyValue.get("LOCALHOST_IP");
-
-	private AmazonHelper(){}
-	
-	private static AWSCredentials getCredentials() {
-		return new BasicAWSCredentials(ConfigPropertyValue.getAwsAccessKeyId(), ConfigPropertyValue.getAwsSecretAccessKey());
-	}
-	
-	private static Region getRegion() {
-		return Region.getRegion(Regions.fromName(ConfigPropertyValue.getAwsRegion()));
-	}
-
-	private static List<Instance> getInstances(String instanceName) {
-		AWSCredentials credentials = getCredentials();
-		AmazonEC2 ec2 = new AmazonEC2Client(credentials);
-		ec2.setRegion(getRegion());
-
-		List<String> valuesT1 = new ArrayList<>();
-		valuesT1.add(instanceName + "*");
-		Filter filter = new Filter("tag:" + NamingHelper.getServiceBaseName() + "-Tag", valuesT1);
-
-		DescribeInstancesRequest describeInstanceRequest = new DescribeInstancesRequest().withFilters(filter);
-		DescribeInstancesResult describeInstanceResult = ec2.describeInstances(describeInstanceRequest);
-
-		List<Reservation> reservations = describeInstanceResult.getReservations();
-
-		if (reservations.isEmpty()) {
-			throw new CloudException("Instance " + instanceName + " in Amazon not found");
-		}
-
-		List<Instance> instances = reservations.get(0).getInstances();
-		if (instances.isEmpty()) {
-			throw new CloudException("Instance " + instanceName + " in Amazon not found");
-		}
-
-		return instances;
-	}
-
-	public static Instance getInstance(String instanceName) {
-    	return (ConfigPropertyValue.isRunModeLocal() ?
-    			new Instance()
-            		.withPrivateDnsName("localhost")
-            		.withPrivateIpAddress(LOCALHOST_IP)
-            		.withPublicDnsName("localhost")
-            		.withPublicIpAddress(LOCALHOST_IP)
-            		.withTags(new Tag()
-            					.withKey("Name")
-            					.withValue(instanceName)) :
-            	getInstances(instanceName).get(0));
-    }
-
-	public static void checkAmazonStatus(String instanceName, AmazonInstanceState expAmazonState) throws
-			InterruptedException {
-        LOGGER.info("Check status of instance {} on Amazon: {}", instanceName);
-        if (ConfigPropertyValue.isRunModeLocal()) {
-        	LOGGER.info("Amazon instance {} fake state is {}", instanceName, expAmazonState);
-        	return;
-        }
-        
-        String instanceState;
-        long requestTimeout = ConfigPropertyValue.getAwsRequestTimeout().toMillis();
-    	long timeout = CHECK_TIMEOUT.toMillis();
-        long expiredTime = System.currentTimeMillis() + timeout;
-        Instance instance;
-        while (true) {
-        	instance = AmazonHelper.getInstance(instanceName);
-        	instanceState = instance.getState().getName();
-        	if (!instance.getState().getName().equals("shutting-down")) {
-        		break;
-        	}
-        	if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-                LOGGER.info("Amazon instance {} state is {}", instanceName, instanceState);
-        		throw new CloudException("Timeout has been expired for check amazon instance " + instanceState);
-            }
-            Thread.sleep(requestTimeout);
-        }
-        
-        for (Instance i : AmazonHelper.getInstances(instanceName)) {
-            LOGGER.info("Amazon instance {} state is {}. Instance id {}, private IP {}, public IP {}",
-            		instanceName, instanceState, i.getInstanceId(), i.getPrivateIpAddress(), i.getPublicIpAddress());
-		}
-        Assert.assertEquals(instanceState, expAmazonState.toString(), "Amazon instance " + instanceName + " state is not correct. Instance id " +
-        		instance.getInstanceId() + ", private IP " + instance.getPrivateIpAddress() + ", public IP " + instance.getPublicIpAddress());
-    }
-
-    public static void printBucketGrants(String bucketName){
-        LOGGER.info("Print grants for bucket {} on Amazon: " , bucketName);
-        if (ConfigPropertyValue.isRunModeLocal()) {
-        	LOGGER.info("  action skipped for run in local mode");
-        	return;
-        }
-        AWSCredentials credentials = getCredentials();
-        AmazonS3 s3 = new AmazonS3Client(credentials);
-        
-        s3.setRegion(getRegion());
-        AccessControlList acl = s3.getBucketAcl(bucketName);
-        for (Grant grant : acl.getGrants()) {
-            LOGGER.info(grant);
-		}
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonInstanceState.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonInstanceState.java
deleted file mode 100644
index 576be9d..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonInstanceState.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.cloud.aws;
-
-public enum AmazonInstanceState {
-    STARTING,
-    RUNNING,
-    TERMINATED,
-    STOPPED;
-	
-    @Override
-    public String toString() {
-    	return super.toString().toLowerCase();
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/azure/AzureHelper.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/azure/AzureHelper.java
deleted file mode 100644
index 25fb388..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/azure/AzureHelper.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.cloud.azure;
-
-import com.epam.dlab.automation.exceptions.CloudException;
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.microsoft.azure.management.Azure;
-import com.microsoft.azure.management.compute.PowerState;
-import com.microsoft.azure.management.compute.VirtualMachine;
-import com.microsoft.azure.management.network.NetworkInterface;
-import com.microsoft.azure.management.network.PublicIPAddress;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.File;
-import java.io.IOException;
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-
-public class AzureHelper{
-
-    private static final Logger LOGGER = LogManager.getLogger(AzureHelper.class);
-    private static final Duration CHECK_TIMEOUT = Duration.parse("PT10m");
-    private static final String LOCALHOST_IP = ConfigPropertyValue.get("LOCALHOST_IP");
-
-	private static Azure azure = getAzureInstance();
-
-    private AzureHelper(){}
-
-	private static Azure getAzureInstance() {
-		if (!ConfigPropertyValue.isRunModeLocal() && Objects.isNull(azure)) {
-			try {
-				return Azure.configure().authenticate(
-						new File(ConfigPropertyValue.getAzureAuthFileName())).withDefaultSubscription();
-			} catch (IOException e) {
-				LOGGER.info("An exception occured: {}", e);
-			}
-		}
-		return azure;
-	}
-
-    private static List<VirtualMachine> getVirtualMachines(){
-        return !azure.virtualMachines().list().isEmpty() ? new ArrayList<>(azure.virtualMachines().list()) : null;
-    }
-
-    public static List<VirtualMachine> getVirtualMachinesByName(String name, boolean restrictionMode){
-        if(ConfigPropertyValue.isRunModeLocal()){
-
-            List<VirtualMachine> vmLocalModeList = new ArrayList<>();
-            VirtualMachine mockedVM = mock(VirtualMachine.class);
-            PublicIPAddress mockedIPAddress = mock(PublicIPAddress.class);
-            NetworkInterface mockedNetworkInterface = mock(NetworkInterface.class);
-            when(mockedVM.getPrimaryPublicIPAddress()).thenReturn(mockedIPAddress);
-            when(mockedIPAddress.ipAddress()).thenReturn(LOCALHOST_IP);
-            when(mockedVM.getPrimaryNetworkInterface()).thenReturn(mockedNetworkInterface);
-            when(mockedNetworkInterface.primaryPrivateIP()).thenReturn(LOCALHOST_IP);
-            vmLocalModeList.add(mockedVM);
-
-            return vmLocalModeList;
-
-        }
-        List<VirtualMachine> vmList = getVirtualMachines();
-        if(vmList == null){
-            LOGGER.warn("There is not any virtual machine in Azure");
-            return vmList;
-        }
-        if(restrictionMode){
-            vmList.removeIf(vm -> !hasName(vm, name));
-        }
-        else{
-            vmList.removeIf(vm -> !containsName(vm, name));
-        }
-        return !vmList.isEmpty() ? vmList : null;
-    }
-
-    private static boolean hasName(VirtualMachine vm, String name){
-        return vm.name().equals(name);
-    }
-
-    private static boolean containsName(VirtualMachine vm, String name){
-        return vm.name().contains(name);
-    }
-
-    private static PowerState getStatus(VirtualMachine vm){
-        return vm.powerState();
-    }
-
-	public static void checkAzureStatus(String virtualMachineName, PowerState expAzureState, boolean restrictionMode)
-			throws InterruptedException {
-        LOGGER.info("Check status of virtual machine with name {} on Azure", virtualMachineName);
-        if (ConfigPropertyValue.isRunModeLocal()) {
-            LOGGER.info("Azure virtual machine with name {} fake state is {}", virtualMachineName, expAzureState);
-            return;
-        }
-        List<VirtualMachine> vmsWithName = getVirtualMachinesByName(virtualMachineName, restrictionMode);
-        if(vmsWithName == null){
-            LOGGER.warn("There is not any virtual machine in Azure with name {}", virtualMachineName);
-            return;
-        }
-
-        PowerState virtualMachineState;
-        long requestTimeout = ConfigPropertyValue.getAzureRequestTimeout().toMillis();
-        long timeout = CHECK_TIMEOUT.toMillis();
-        long expiredTime = System.currentTimeMillis() + timeout;
-        VirtualMachine virtualMachine = vmsWithName.get(0);
-        while (true) {
-            virtualMachineState = getStatus(virtualMachine);
-            if (virtualMachineState == expAzureState) {
-                break;
-            }
-            if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-                LOGGER.info("Azure virtual machine with name {} state is {}", virtualMachineName, getStatus(virtualMachine));
-                throw new CloudException("Timeout has been expired for check state of azure virtual machine with name " + virtualMachineName);
-            }
-            Thread.sleep(requestTimeout);
-        }
-
-        for (VirtualMachine  vm : vmsWithName) {
-            LOGGER.info("Azure virtual machine with name {} state is {}. Virtual machine id {}, private IP {}, public IP {}",
-                    virtualMachineName, getStatus(vm), vm.vmId(), vm.getPrimaryNetworkInterface().primaryPrivateIP(),
-                    vm.getPrimaryPublicIPAddress() != null ? vm.getPrimaryPublicIPAddress().ipAddress() : "doesn't exist for this resource type");
-        }
-        Assert.assertEquals(virtualMachineState, expAzureState, "Azure virtual machine with name " + virtualMachineName +
-                " state is not correct. Virtual machine id " +
-                virtualMachine.vmId() + ", private IP " + virtualMachine.getPrimaryNetworkInterface().primaryPrivateIP() +
-                ", public IP " +
-                (virtualMachine.getPrimaryPublicIPAddress() != null ? virtualMachine.getPrimaryPublicIPAddress().ipAddress() : "doesn't exist for this resource type" ));
-    }
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/gcp/GcpHelper.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/gcp/GcpHelper.java
deleted file mode 100644
index 7240464..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/gcp/GcpHelper.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.cloud.gcp;
-
-import com.epam.dlab.automation.exceptions.CloudException;
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
-import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport;
-import com.google.api.client.http.HttpTransport;
-import com.google.api.client.json.JsonFactory;
-import com.google.api.client.json.jackson2.JacksonFactory;
-import com.google.api.services.compute.Compute;
-import com.google.api.services.compute.model.*;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-import java.time.Duration;
-import java.util.*;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class GcpHelper {
-
-	private static final Logger LOGGER = LogManager.getLogger(GcpHelper.class);
-	private static final Duration CHECK_TIMEOUT = Duration.parse("PT10m");
-	private static final String LOCALHOST_IP = ConfigPropertyValue.get("LOCALHOST_IP");
-	private static final String NOT_EXIST = "doesn't exist for this resource type";
-
-	private GcpHelper() {
-	}
-
-	private static List<Instance> getInstances(String projectId, List<String> zones) throws IOException {
-		List<Instance> instanceList = new ArrayList<>();
-		for (String zone : zones) {
-			Compute.Instances.List request = ComputeService.getInstance().instances().list(projectId, zone);
-			InstanceList response;
-			do {
-				response = request.execute();
-				if (response.getItems() == null) {
-					continue;
-				}
-				instanceList.addAll(response.getItems());
-				request.setPageToken(response.getNextPageToken());
-			} while (response.getNextPageToken() != null);
-
-		}
-		return !instanceList.isEmpty() ? instanceList : null;
-	}
-
-	public static List<String> getInstancePrivateIps(Instance instance) {
-		return instance.getNetworkInterfaces().stream().filter(Objects::nonNull)
-				.map(NetworkInterface::getNetworkIP).filter(Objects::nonNull)
-				.collect(Collectors.toList());
-	}
-
-	public static List<String> getInstancePublicIps(Instance instance) {
-		return instance.getNetworkInterfaces()
-				.stream().filter(Objects::nonNull)
-				.map(NetworkInterface::getAccessConfigs)
-				.filter(Objects::nonNull).map(Collection::stream)
-				.flatMap(Function.identity()).filter(Objects::nonNull)
-				.map(AccessConfig::getNatIP).filter(Objects::nonNull)
-				.collect(Collectors.toList());
-	}
-
-
-	public static List<Instance> getInstancesByName(String name, String projectId, boolean restrictionMode,
-													List<String> zones) throws IOException {
-		if (ConfigPropertyValue.isRunModeLocal()) {
-			List<Instance> mockedInstanceList = new ArrayList<>();
-			Instance mockedInstance = mock(Instance.class);
-			NetworkInterface mockedNetworkInterface = mock(NetworkInterface.class);
-			when(mockedInstance.getNetworkInterfaces()).thenReturn(Collections.singletonList(mockedNetworkInterface));
-			when(mockedInstance.getNetworkInterfaces().get(0).getNetworkIP()).thenReturn(LOCALHOST_IP);
-			AccessConfig mockedAccessConfig = mock(AccessConfig.class);
-			when(mockedInstance.getNetworkInterfaces().get(0).getAccessConfigs())
-					.thenReturn(Collections.singletonList(mockedAccessConfig));
-			when(mockedInstance.getNetworkInterfaces().get(0).getAccessConfigs().get(0).getNatIP())
-					.thenReturn(LOCALHOST_IP);
-			mockedInstanceList.add(mockedInstance);
-			return mockedInstanceList;
-		}
-		List<Instance> instanceList = getInstances(projectId, zones);
-		if (instanceList == null) {
-			LOGGER.warn("There is not any virtual machine in GCP for project with id {}", projectId);
-			return instanceList;
-		}
-		if (restrictionMode) {
-			instanceList.removeIf(instance -> !hasName(instance, name));
-		} else {
-			instanceList.removeIf(instance -> !containsName(instance, name));
-		}
-		return !instanceList.isEmpty() ? instanceList : null;
-	}
-
-	private static boolean hasName(Instance instance, String name) {
-		return instance.getName().equals(name);
-	}
-
-	private static boolean containsName(Instance instance, String name) {
-		return instance.getName().contains(name);
-	}
-
-	private static String getStatus(Instance instance) {
-		return instance.getStatus().toLowerCase();
-	}
-
-	public static void checkGcpStatus(String instanceName, String projectId, GcpInstanceState expGcpStatus, boolean
-			restrictionMode, List<String> zones) throws InterruptedException, IOException {
-
-		LOGGER.info("Check status of instance with name {} on GCP", instanceName);
-		if (ConfigPropertyValue.isRunModeLocal()) {
-			LOGGER.info("GCP instance with name {} fake status is {}", instanceName, expGcpStatus);
-			return;
-		}
-		List<Instance> instancesWithName = getInstancesByName(instanceName, projectId, restrictionMode, zones);
-		if (instancesWithName == null) {
-			LOGGER.warn("There is not any instance in GCP with name {}", instanceName);
-			return;
-		}
-
-		String instanceStatus;
-		long requestTimeout = ConfigPropertyValue.getGcpRequestTimeout().toMillis();
-		long timeout = CHECK_TIMEOUT.toMillis();
-		long expiredTime = System.currentTimeMillis() + timeout;
-		Instance instance = instancesWithName.get(0);
-		while (true) {
-			instanceStatus = getStatus(instance);
-			if (instanceStatus.equalsIgnoreCase(expGcpStatus.toString())) {
-				break;
-			}
-			if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-				LOGGER.info("GCP instance with name {} state is {}", instanceName, getStatus(instance));
-				throw new CloudException("Timeout has been expired for check status of GCP instance with " +
-						"name " + instanceName);
-			}
-			Thread.sleep(requestTimeout);
-		}
-
-		for (Instance inst : instancesWithName) {
-			LOGGER.info("GCP instance with name {} status is {}. Instance id {}, private IP {}, public " +
-							"IP {}",
-					instanceName, getStatus(inst), inst.getId(), (!getInstancePrivateIps(inst).isEmpty() ?
-							getInstancePrivateIps(inst).get(0) : NOT_EXIST),
-					(!getInstancePublicIps(inst).isEmpty() ? getInstancePublicIps(inst).get(0) : NOT_EXIST));
-		}
-		Assert.assertEquals(instanceStatus, expGcpStatus.toString(), "GCP instance with name " + instanceName +
-				" status is not correct. Instance id " + instance.getId() + ", private IP " +
-				(!getInstancePrivateIps(instance).isEmpty() ? getInstancePrivateIps(instance).get(0) : NOT_EXIST) +
-				", public IP " +
-				(!getInstancePublicIps(instance).isEmpty() ? getInstancePublicIps(instance).get(0) : NOT_EXIST));
-	}
-
-	public static List<String> getAvailableZonesForProject(String projectId) throws IOException {
-		if (ConfigPropertyValue.isRunModeLocal()) {
-			return Collections.emptyList();
-		}
-		List<Zone> zoneList = new ArrayList<>();
-		Compute.Zones.List request = ComputeService.getInstance().zones().list(projectId);
-		ZoneList response;
-		do {
-			response = request.execute();
-			if (response.getItems() == null) {
-				continue;
-			}
-			zoneList.addAll(response.getItems());
-			request.setPageToken(response.getNextPageToken());
-		} while (response.getNextPageToken() != null);
-		return zoneList.stream().map(Zone::getDescription).collect(Collectors.toList());
-	}
-
-	private static class ComputeService {
-
-		private static Compute instance;
-
-		private ComputeService() {
-		}
-
-		static synchronized Compute getInstance() throws IOException {
-			if (!ConfigPropertyValue.isRunModeLocal() && instance == null) {
-				try {
-					instance = createComputeService();
-				} catch (GeneralSecurityException e) {
-					LOGGER.info("An exception occured: {}", e);
-				}
-			}
-			return instance;
-		}
-
-		private static Compute createComputeService() throws IOException, GeneralSecurityException {
-			HttpTransport httpTransport = GoogleNetHttpTransport.newTrustedTransport();
-			JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
-
-			GoogleCredential credential =
-					GoogleCredential.fromStream(new FileInputStream(ConfigPropertyValue.getGcpAuthFileName()));
-			if (credential.createScopedRequired()) {
-				credential = credential.createScoped(
-						Collections.singletonList("https://www.googleapis.com/auth/cloud-platform"));
-			}
-
-			return new Compute.Builder(httpTransport, jsonFactory, credential)
-					.setApplicationName("Google-ComputeSample/0.1")
-					.build();
-		}
-
-	}
-
-}
-
-
-
-
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/gcp/GcpInstanceState.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/gcp/GcpInstanceState.java
deleted file mode 100644
index 5c084c1..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/gcp/GcpInstanceState.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.cloud.gcp;
-
-public enum GcpInstanceState {
-	STARTING,
-	RUNNING,
-	TERMINATED,
-	STOPPED;
-
-	@Override
-	public String toString() {
-		return super.toString().toLowerCase();
-	}
-}
-
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/AckStatus.java b/integration-tests/src/main/java/com/epam/dlab/automation/docker/AckStatus.java
deleted file mode 100644
index df4b7e7..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/AckStatus.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.docker;
-
-public class AckStatus {
-    private int status;
-    
-    private String message;
-
-    public AckStatus() {
-        status = 0;
-        message = "";
-    }
-
-	AckStatus(int status, String message) {
-        this.status = status;
-        this.message = message;
-    }
-
-    public int getStatus() {
-        return status;
-    }
-
-    public String getMessage() {
-        return message;
-    }
-
-    public boolean isOk() {
-        return status == 0;
-    }
-
-    @Override
-    public String toString() {
-        return isOk() ? "OK" : message;
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Bridge.java b/integration-tests/src/main/java/com/epam/dlab/automation/docker/Bridge.java
deleted file mode 100644
index 8cc6015..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Bridge.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.docker;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-@JsonIgnoreProperties(ignoreUnknown = true)
-public class Bridge {
-
-	@JsonProperty("IPAMConfig")
-	private Object ipamConfig;
-
-	@JsonProperty("Links")
-	private Object links;
-
-	@JsonProperty("Aliases")
-	private Object aliases;
-
-	@JsonProperty("NetworkID")
-	private String networkId;
-
-	@JsonProperty("EndpointID")
-	private String endpointId;
-
-	@JsonProperty("Gateway")
-	private String gateway;
-
-	@JsonProperty("IPAddress")
-	private String ipAddress;
-
-	@JsonProperty("IPPrefixLen")
-	private int ipPrefixLen;
-
-	@JsonProperty("IPv6Gateway")
-	private String ipv6Gateway;
-
-	@JsonProperty("GlobalIPv6Address")
-	private String globalIpv6Address;
-
-	@JsonProperty("GlobalIPv6PrefixLen")
-	private int globalIpv6PrefixLen;
-
-	@JsonProperty("MacAddress")
-	private String macAddress;
-
-
-	public Object getIpamConfig() {
-		return ipamConfig;
-	}
-
-	public void setIpamConfig(Object ipamConfig) {
-		this.ipamConfig = ipamConfig;
-	}
-
-	public Object getLinks() {
-		return links;
-	}
-
-	public void setLinks(Object links) {
-		this.links = links;
-	}
-
-	public Object getAliases() {
-		return aliases;
-	}
-
-	public void setAliases(Object aliases) {
-		this.aliases = aliases;
-	}
-
-	public String getNetworkId() {
-		return networkId;
-	}
-
-	public void setNetworkId(String networkId) {
-		this.networkId = networkId;
-	}
-
-	public String getEndpointId() {
-		return endpointId;
-	}
-
-	public void setEndpointId(String endpointId) {
-		this.endpointId = endpointId;
-	}
-
-	public String getGateway() {
-		return gateway;
-	}
-
-	public void setGateway(String gateway) {
-		this.gateway = gateway;
-	}
-
-	public String getIpAddress() {
-		return ipAddress;
-	}
-
-	public void setIpAddress(String ipAddress) {
-		this.ipAddress = ipAddress;
-	}
-
-	public int getIpPrefixLen() {
-		return ipPrefixLen;
-	}
-
-	public void setIpPrefixLen(int ipPrefixLen) {
-		this.ipPrefixLen = ipPrefixLen;
-	}
-
-	public String getIpv6Gateway() {
-		return ipv6Gateway;
-	}
-
-	public void setIpv6Gateway(String ipv6Gateway) {
-		this.ipv6Gateway = ipv6Gateway;
-	}
-
-	public String getGlobalIpv6Address() {
-		return globalIpv6Address;
-	}
-
-	public void setGlobalIpv6Address(String globalIpv6Address) {
-		this.globalIpv6Address = globalIpv6Address;
-	}
-
-	public int getGlobalIpv6PrefixLen() {
-		return globalIpv6PrefixLen;
-	}
-
-	public void setGlobalIpv6PrefixLen(int globalIpv6PrefixLen) {
-		this.globalIpv6PrefixLen = globalIpv6PrefixLen;
-	}
-
-	public String getMacAddress() {
-		return macAddress;
-	}
-
-	public void setMacAddress(String macAddress) {
-		this.macAddress = macAddress;
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Docker.java b/integration-tests/src/main/java/com/epam/dlab/automation/docker/Docker.java
deleted file mode 100644
index 012edaf..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Docker.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.docker;
-
-import com.epam.dlab.automation.exceptions.DockerException;
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.jcraft.jsch.ChannelExec;
-import com.jcraft.jsch.JSchException;
-import com.jcraft.jsch.Session;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.List;
-
-public class Docker {
-    private static final Logger LOGGER = LogManager.getLogger(Docker.class);
-    
-    private static final String GET_CONTAINERS = "echo -e \"GET /containers/json?all=1 HTTP/1.0\\r\\n\" | nc -U /var/run/docker.sock";
-    
-    private static final String DOCKER_STATUS_EXIT0 = "Exited (0)";
-
-    private Docker(){}
-
-    public static void checkDockerStatus(String containerName, String ip)
-			throws IOException, InterruptedException, JSchException {
-        
-        LOGGER.info("Check docker status for instance {} and container {}", ip, containerName);
-        if (ConfigPropertyValue.isRunModeLocal()) {
-        	LOGGER.info("  check skipped for run in local mode");
-        	return;
-        }
-
-        Session session = SSHConnect.getConnect(ConfigPropertyValue.getClusterOsUser(), ip, 22);
-        ChannelExec getResult = SSHConnect.setCommand(session, GET_CONTAINERS);
-        InputStream in = getResult.getInputStream();
-        List<DockerContainer> dockerContainerList = getDockerContainerList(in);
-        AckStatus status = SSHConnect.checkAck(getResult);
-        Assert.assertTrue(status.isOk());
-        
-        DockerContainer dockerContainer = getDockerContainer(dockerContainerList, containerName);
-        LOGGER.debug("Docker container for {} has id {} and status {}", containerName, dockerContainer.getId(), dockerContainer.getStatus());
-        Assert.assertEquals(dockerContainer.getStatus().contains(DOCKER_STATUS_EXIT0), true, "Status of container is not Exited (0)");
-        LOGGER.info("Docker container {} has status {}", containerName, DOCKER_STATUS_EXIT0);
-    }
-
-    private static List<DockerContainer> getDockerContainerList(InputStream in) throws IOException {
-        
-        BufferedReader reader = new BufferedReader(new InputStreamReader(in));         
-        String line;
-        List<DockerContainer> dockerContainerList = null;
-
-        TypeReference<List<DockerContainer>> typeRef = new TypeReference<List<DockerContainer>>() { };
-        ObjectMapper mapper = new ObjectMapper();
-
-		List<String> result = new ArrayList<>();
-        while ((line = reader.readLine()) != null) {      
-             result.add(line);
-             if (line.contains("Id")) {
-            	 LOGGER.trace("Add docker container: {}", line);
-                 dockerContainerList = mapper.readValue(line, typeRef);
-             }       
-        }
-        
-        return dockerContainerList;
-    }
-
-	private static DockerContainer getDockerContainer(List<DockerContainer> dockerContainerList, String
-			containerName) {
-		for (DockerContainer dockerContainer : dockerContainerList) {
-			String name = dockerContainer.getNames().get(0);
-			if (name.contains(containerName)) {
-				return dockerContainer;
-			}
-		}
-        
-        final String msg = "Docker container for " + containerName + " not found";
-        LOGGER.error(msg);
-		StringBuilder containers = new StringBuilder("Container list:");
-		for (DockerContainer dockerContainer : dockerContainerList) {
-			containers.append(System.lineSeparator()).append(dockerContainer.getNames().get(0));
-		}
-		LOGGER.debug(containers.toString());
-
-		throw new DockerException("Docker container for " + containerName + " not found");
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/DockerContainer.java b/integration-tests/src/main/java/com/epam/dlab/automation/docker/DockerContainer.java
deleted file mode 100644
index c22a688..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/DockerContainer.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.docker;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import java.util.List;
-
-public class DockerContainer {
-
-	@JsonProperty("Id")
-	private String id;
-
-	@JsonProperty("Names")
-	private List<String> names;
-
-	@JsonProperty("Image")
-	private String image;
-
-	@JsonProperty("ImageID")
-	private String imageID;
-
-	@JsonProperty("Command")
-	private String command;
-
-	@JsonProperty("Created")
-	private int created;
-
-	@JsonProperty("Ports")
-	private List<Object> ports;
-
-	@JsonProperty("Labels")
-	private Labels labels;
-
-	@JsonProperty("State")
-	private String state;
-
-	@JsonProperty("Status")
-	private String status;
-
-	@JsonProperty("HostConfig")
-	private HostConfig hostConfig;
-
-	@JsonProperty("NetworkSettings")
-	private NetworkSettings networkSettings;
-
-	@JsonProperty("Mounts")
-	private List<Object> mounts;
-
-
-    public String getId() {
-		return id;
-	}
-
-	public void setId(String id) {
-		this.id = id;
-	}
-
-	public List<String> getNames() {
-		return names;
-	}
-
-	public void setNames(List<String> names) {
-		this.names = names;
-	}
-
-	public String getImage() {
-		return image;
-	}
-
-	public void setImage(String image) {
-		this.image = image;
-	}
-
-	public String getImageID() {
-		return imageID;
-	}
-
-	public void setImageID(String imageID) {
-		this.imageID = imageID;
-	}
-
-	public String getCommand() {
-		return command;
-	}
-
-	public void setCommand(String command) {
-		this.command = command;
-	}
-
-	public int getCreated() {
-		return created;
-	}
-
-	public void setCreated(int created) {
-		this.created = created;
-	}
-
-	public List<Object> getPorts() {
-		return ports;
-	}
-
-	public void setPorts(List<Object> ports) {
-		this.ports = ports;
-	}
-
-	public Labels getLabels() {
-		return labels;
-	}
-
-	public void setLabels(Labels labels) {
-		this.labels = labels;
-	}
-
-	public String getState() {
-		return state;
-	}
-
-	public void setState(String state) {
-		this.state = state;
-	}
-
-	public String getStatus() {
-		return status;
-	}
-
-	public void setStatus(String status) {
-		this.status = status;
-	}
-
-	public HostConfig getHostConfig() {
-		return hostConfig;
-	}
-
-	public void setHostConfig(HostConfig hostConfig) {
-		this.hostConfig = hostConfig;
-	}
-
-	public NetworkSettings getNetworkSettings() {
-		return networkSettings;
-	}
-
-	public void setNetworkSettings(NetworkSettings networkSettings) {
-		this.networkSettings = networkSettings;
-	}
-
-	public List<Object> getMounts() {
-		return mounts;
-	}
-
-	public void setMounts(List<Object> mounts) {
-		this.mounts = mounts;
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/HostConfig.java b/integration-tests/src/main/java/com/epam/dlab/automation/docker/HostConfig.java
deleted file mode 100644
index c2d5db0..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/HostConfig.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.docker;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class HostConfig {
-
-	@JsonProperty("NetworkMode")
-	private String networkMode;
-
-    public String getNetworkMode() {
-		return networkMode;
-    }
-
-    public void setNetworkMode(String networkMode) {
-		this.networkMode = networkMode;
-    }
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Networks.java b/integration-tests/src/main/java/com/epam/dlab/automation/docker/Networks.java
deleted file mode 100644
index 2679fa3..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Networks.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.docker;
-
-public class Networks {
-
-	private Bridge bridge;
-
-    public Bridge getBridge() {
-		return bridge;
-    }
-
-    public void setBridge(Bridge bridge) {
-		this.bridge = bridge;
-    }
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/SSHConnect.java b/integration-tests/src/main/java/com/epam/dlab/automation/docker/SSHConnect.java
deleted file mode 100644
index fa369fa..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/SSHConnect.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.docker;
-
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.jcraft.jsch.*;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import java.util.Properties;
-
-import static java.lang.System.err;
-import static java.lang.System.out;
-
-public class SSHConnect {
-	private static final Logger LOGGER = LogManager.getLogger(SSHConnect.class);
-	private static final String LOCALHOST_IP = ConfigPropertyValue.get("LOCALHOST_IP");
-	private static final String STRICT_HOST_KEY_CHECKING = "StrictHostKeyChecking";
-
-	private SSHConnect() {
-	}
-
-    public static Session getConnect(String username, String host, int port) throws JSchException {
-        Session session;
-        JSch jsch = new JSch();
-
-        Properties config = new Properties();
-		config.put(STRICT_HOST_KEY_CHECKING, "no");
-        
-        jsch.addIdentity(ConfigPropertyValue.getAccessKeyPrivFileName());
-        session = jsch.getSession(username, host, port);
-        session.setConfig(config);
-
-        LOGGER.info("Connecting as {} to {}:{}", username, host, port);
-        session.connect();
-
-        LOGGER.info("Getting connected to {}:{}", host, port);
-        return session;
-    }
-
-    public static Session getSession(String username, String host, int port) throws JSchException {
-        Session session;
-        JSch jsch = new JSch();
-
-        Properties config = new Properties();
-		config.put(STRICT_HOST_KEY_CHECKING, "no");
-
-        jsch.addIdentity(ConfigPropertyValue.getAccessKeyPrivFileName());
-        session = jsch.getSession(username, host, port);
-        session.setConfig(config);
-        session.connect();
-
-
-        LOGGER.info("Getting connected to {}:{}", host, port);
-        return session;
-    }
-
-    public static ChannelSftp getChannelSftp(Session session) throws JSchException {
-        Channel channel = session.openChannel("sftp");
-        channel.connect();
-		return (ChannelSftp) channel;
-    }
-
-    public static Session getForwardedConnect(String username, String hostAlias, int port) throws JSchException {
-        Session session;
-        JSch jsch = new JSch();
-        Properties config = new Properties();
-		config.put(STRICT_HOST_KEY_CHECKING, "no");
-
-        jsch.addIdentity(ConfigPropertyValue.getAccessKeyPrivFileName());
-        session = jsch.getSession(username, LOCALHOST_IP, port);
-        session.setConfig(config);
-        session.setHostKeyAlias(hostAlias);
-        session.connect();
-        LOGGER.info("Getting connected to {} through {}:{}", hostAlias, LOCALHOST_IP, port);
-        return session;
-    }
-
-	public static ChannelExec setCommand(Session session, String command) throws JSchException {
-        LOGGER.info("Setting command: {}", command);
-
-        ChannelExec channelExec = (ChannelExec)session.openChannel("exec");
-        channelExec.setCommand(command);
-        channelExec.connect();
-
-        return channelExec;
-    }
-
-	public static AckStatus checkAck(ChannelExec channel) throws InterruptedException {
-		channel.setOutputStream(out, true);
-		channel.setErrStream(err, true);
-
-        int status;
-        while(channel.getExitStatus() == -1) {
-            Thread.sleep(1000);
-        }
-        status = channel.getExitStatus();
-
-        return new AckStatus(status, "");
-    }
-
-	public static AckStatus checkAck(ChannelSftp channel) throws InterruptedException {
-		channel.setOutputStream(out, true);
-
-        int status;
-        while(channel.getExitStatus() == -1) {
-            Thread.sleep(1000);
-        }
-        status = channel.getExitStatus();
-
-        return new AckStatus(status, "");
-    }
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/CloudException.java b/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/CloudException.java
deleted file mode 100644
index b576931..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/CloudException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.exceptions;
-
-public class CloudException extends RuntimeException {
-
-	private static final long serialVersionUID = 1L;
-
-    public CloudException(String message){
-        super(message);
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/DockerException.java b/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/DockerException.java
deleted file mode 100644
index ae8d7df..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/DockerException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.exceptions;
-
-public class DockerException extends RuntimeException {
-
-	private static final long serialVersionUID = 1L;
-
-	public DockerException(String message) {
-		super(message);
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/LoadFailException.java b/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/LoadFailException.java
deleted file mode 100644
index 16d4f20..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/LoadFailException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.exceptions;
-
-public class LoadFailException extends RuntimeException {
-
-	private static final long serialVersionUID = 1L;
-
-	public LoadFailException(String message, Exception cause) {
-		super(message, cause);
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/helper/CloudHelper.java b/integration-tests/src/main/java/com/epam/dlab/automation/helper/CloudHelper.java
deleted file mode 100644
index 4e0894f..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/helper/CloudHelper.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.helper;
-
-import com.amazonaws.services.ec2.model.Instance;
-import com.amazonaws.services.ec2.model.Tag;
-import com.epam.dlab.automation.cloud.aws.AmazonHelper;
-import com.epam.dlab.automation.cloud.azure.AzureHelper;
-import com.epam.dlab.automation.cloud.gcp.GcpHelper;
-import com.epam.dlab.automation.exceptions.CloudException;
-import com.epam.dlab.automation.model.DeployClusterDto;
-import com.epam.dlab.automation.model.DeployDataProcDto;
-import com.epam.dlab.automation.model.DeployEMRDto;
-import com.epam.dlab.automation.model.NotebookConfig;
-import org.apache.commons.lang3.StringUtils;
-
-import java.io.IOException;
-import java.util.List;
-
-public class CloudHelper {
-
-    private CloudHelper(){}
-
-	public static String getInstancePublicIP(String name, boolean restrictionMode) throws IOException {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return AmazonHelper.getInstance(name).getPublicIpAddress();
-            case CloudProvider.AZURE_PROVIDER:
-                if(AzureHelper.getVirtualMachinesByName(name, restrictionMode) != null){
-                    return AzureHelper.getVirtualMachinesByName(name, restrictionMode).get(0)
-                            .getPrimaryPublicIPAddress().ipAddress();
-                } else return null;
-            case CloudProvider.GCP_PROVIDER:
-                List<com.google.api.services.compute.model.Instance> instanceList =
-                        GcpHelper.getInstancesByName(name, ConfigPropertyValue.getGcpDlabProjectId(),
-                                restrictionMode,
-                                GcpHelper.getAvailableZonesForProject(ConfigPropertyValue.getGcpDlabProjectId()));
-                if (instanceList != null && !GcpHelper.getInstancePublicIps(instanceList.get(0)).isEmpty()) {
-                    return GcpHelper.getInstancePublicIps(instanceList.get(0)).get(0);
-                } else return null;
-            default:
-                return null;
-        }
-    }
-
-	public static String getInstancePrivateIP(String name, boolean restrictionMode) throws IOException {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return AmazonHelper.getInstance(name).getPrivateIpAddress();
-            case CloudProvider.AZURE_PROVIDER:
-                if(AzureHelper.getVirtualMachinesByName(name, restrictionMode) != null){
-                    return AzureHelper.getVirtualMachinesByName(name, restrictionMode).get(0)
-                            .getPrimaryNetworkInterface().primaryPrivateIP();
-                } else return null;
-            case CloudProvider.GCP_PROVIDER:
-                List<com.google.api.services.compute.model.Instance> instanceList =
-                        GcpHelper.getInstancesByName(name, ConfigPropertyValue.getGcpDlabProjectId(), restrictionMode,
-                                GcpHelper.getAvailableZonesForProject(ConfigPropertyValue.getGcpDlabProjectId()));
-                if (instanceList != null && !GcpHelper.getInstancePrivateIps(instanceList.get(0)).isEmpty()) {
-                    return GcpHelper.getInstancePrivateIps(instanceList.get(0)).get(0);
-                } else return null;
-            default:
-                return null;
-        }
-    }
-
-	static String getInstanceNameByCondition(String name, boolean restrictionMode) throws IOException {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                Instance instance = AmazonHelper.getInstance(name);
-                for (Tag tag : instance.getTags()) {
-                    if (tag.getKey().equals("Name")) {
-                        return tag.getValue();
-                    }
-                }
-                throw new CloudException("Could not detect name for instance " + name);
-            case CloudProvider.AZURE_PROVIDER:
-                if(AzureHelper.getVirtualMachinesByName(name, restrictionMode) != null){
-                    return AzureHelper.getVirtualMachinesByName(name, restrictionMode).get(0).name();
-                } else return null;
-            case CloudProvider.GCP_PROVIDER:
-                if (GcpHelper.getInstancesByName(name, ConfigPropertyValue.getGcpDlabProjectId(), restrictionMode,
-                        GcpHelper.getAvailableZonesForProject(ConfigPropertyValue.getGcpDlabProjectId())) != null) {
-                    return GcpHelper.getInstancesByName(name, ConfigPropertyValue.getGcpDlabProjectId(),
-                            restrictionMode,
-                            GcpHelper.getAvailableZonesForProject(ConfigPropertyValue.getGcpDlabProjectId()))
-                            .get(0).getName();
-                }
-                else return null;
-            default:
-                return null;
-        }
-    }
-
-    public static String getClusterConfFileLocation(){
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return PropertiesResolver.getClusterEC2ConfFileLocation();
-            case CloudProvider.AZURE_PROVIDER:
-                return PropertiesResolver.getClusterAzureConfFileLocation();
-            case CloudProvider.GCP_PROVIDER:
-                return PropertiesResolver.getClusterGcpConfFileLocation();
-            default:
-                return null;
-        }
-    }
-
-
-    public static String getPythonTestingScript(){
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return "/usr/bin/python %s --storage %s --cloud aws --cluster_name %s --os_user %s";
-            case CloudProvider.AZURE_PROVIDER:
-                if(ConfigPropertyValue.getAzureDatalakeEnabled().equalsIgnoreCase("true")){
-                    return "/usr/bin/python %s --storage %s --cloud azure --cluster_name %s --os_user %s --azure_datalake_account "
-                            + ConfigPropertyValue.getAzureDatalakeSharedAccount();
-                }
-                else return "/usr/bin/python %s --storage %s --cloud azure --cluster_name %s --os_user %s --azure_storage_account "
-                        + ConfigPropertyValue.getAzureStorageSharedAccount();
-            case CloudProvider.GCP_PROVIDER:
-                return "/usr/bin/python %s --storage %s --cloud gcp --cluster_name %s --os_user %s";
-            default:
-                return null;
-        }
-    }
-
-    public static String getPythonTestingScript2(){
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-				return "/usr/bin/python /home/%s/%s --storage %s --notebook %s --cloud aws";
-            case CloudProvider.AZURE_PROVIDER:
-                if(ConfigPropertyValue.getAzureDatalakeEnabled().equalsIgnoreCase("true")){
-					return "/usr/bin/python /home/%s/%s --storage %s --notebook %s --cloud azure " +
-							"--azure_datalake_account " + ConfigPropertyValue.getAzureDatalakeSharedAccount();
-                } else return "/usr/bin/python /home/%s/%s --storage %s --notebook %s --cloud azure " +
-						"--azure_storage_account " + ConfigPropertyValue.getAzureStorageSharedAccount();
-            case CloudProvider.GCP_PROVIDER:
-				return "/usr/bin/python /home/%s/%s --storage %s --notebook %s --cloud gcp";
-            default:
-                return null;
-        }
-    }
-
-	static String getStorageNameAppendix() {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return "bucket";
-            case CloudProvider.AZURE_PROVIDER:
-                if(ConfigPropertyValue.getAzureDatalakeEnabled().equalsIgnoreCase("true")){
-                    return "folder";
-                }
-                else return "container";
-            case CloudProvider.GCP_PROVIDER:
-                return "bucket";
-            default:
-                return null;
-        }
-    }
-
-	public static String getDockerTemplateFileForDES(boolean isSpotRequired) {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-				return isSpotRequired ? "EMR_spot.json" : "EMR.json";
-            case CloudProvider.GCP_PROVIDER:
-                return "dataproc.json";
-            default:
-                return null;
-        }
-    }
-
-    public static Class<? extends DeployClusterDto> getDeployClusterClass() {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-                return DeployEMRDto.class;
-            case CloudProvider.GCP_PROVIDER:
-                return DeployDataProcDto.class;
-            default:
-                return null;
-        }
-    }
-
-	public static DeployClusterDto populateDeployClusterDto(DeployClusterDto deployClusterDto,
-															NotebookConfig nbConfig) {
-		if (nbConfig.getDataEngineType().equals(NamingHelper.DATA_ENGINE_SERVICE) &&
-				ConfigPropertyValue.getCloudProvider().equals(CloudProvider.AWS_PROVIDER)) {
-			DeployEMRDto emrDto = (DeployEMRDto) deployClusterDto;
-			if (!StringUtils.isEmpty(nbConfig.getDesVersion())) {
-				emrDto.setEmrVersion(nbConfig.getDesVersion());
-			}
-			if (nbConfig.isDesSpotRequired() && nbConfig.getDesSpotPrice() > 0) {
-				emrDto.setEmrSlaveInstanceSpot(nbConfig.isDesSpotRequired());
-				emrDto.setEmrSlaveInstanceSpotPctPrice(nbConfig.getDesSpotPrice());
-			}
-			return emrDto;
-		} else return deployClusterDto;
-	}
-
-	static String getGcpDataprocClusterName(String gcpDataprocMasterNodeName) {
-        return gcpDataprocMasterNodeName != null ?
-                gcpDataprocMasterNodeName.substring(0, gcpDataprocMasterNodeName.lastIndexOf('-')) : null;
-	}
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/helper/CloudProvider.java b/integration-tests/src/main/java/com/epam/dlab/automation/helper/CloudProvider.java
deleted file mode 100644
index f5241a4..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/helper/CloudProvider.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.helper;
-
-public class CloudProvider {
-
-	public static final String AWS_PROVIDER = "aws";
-	public static final String AZURE_PROVIDER = "azure";
-	public static final String GCP_PROVIDER = "gcp";
-
-	private CloudProvider() {
-	}
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/helper/ConfigPropertyValue.java b/integration-tests/src/main/java/com/epam/dlab/automation/helper/ConfigPropertyValue.java
deleted file mode 100644
index aeb6036..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/helper/ConfigPropertyValue.java
+++ /dev/null
@@ -1,387 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.helper;
-
-import com.epam.dlab.automation.exceptions.LoadFailException;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import java.io.File;
-import java.io.FileReader;
-import java.nio.file.Paths;
-import java.time.Duration;
-import java.util.Properties;
-
-public class ConfigPropertyValue {
-
-	private static final Logger LOGGER = LogManager.getLogger(ConfigPropertyValue.class);
-	private static final String CONFIG_FILE_NAME;
-
-    public static final String JENKINS_USERNAME="JENKINS_USERNAME";
-	public static final String JENKINS_PASS = "JENKINS_PASSWORD";
-	private static final String USERNAME="USERNAME";
-	private static final String PASS = "PASSWORD";
-	private static final String NOT_IAM_USERNAME="NOT_IAM_USERNAME";
-	private static final String NOT_IAM_PASS = "NOT_IAM_PASSWORD";
-	private static final String NOT_DLAB_USERNAME="NOT_DLAB_USERNAME";
-	private static final String NOT_DLAB_PASS = "NOT_DLAB_PASSWORD";
-	private static final String JENKINS_JOB_URL="JENKINS_JOB_URL";
-	private static final String USER_FOR_ACTIVATE_KEY="USER_FOR_ACTIVATE_KEY";
-	private static final String PASS_FOR_ACTIVATE_KEY = "PASSWORD_FOR_ACTIVATE_KEY";
-	private static final String ACCESS_KEY_PRIV_FILE_NAME="ACCESS_KEY_PRIV_FILE_NAME";
-	private static final String ACCESS_KEY_PUB_FILE_NAME="ACCESS_KEY_PUB_FILE_NAME";
-
-	private static final  String CLOUD_PROVIDER="CLOUD_PROVIDER";
-    
-    private static final String AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID";
-    private static final String AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY";
-    private static final String AWS_REGION="AWS_REGION";
-    private static final String AWS_REQUEST_TIMEOUT="AWS_REQUEST_TIMEOUT";
-
-    private static final String AZURE_REGION="AZURE_REGION";
-    private static final String AZURE_REQUEST_TIMEOUT="AZURE_REQUEST_TIMEOUT";
-    private static final String AZURE_DATALAKE_ENABLED="AZURE_DATALAKE_ENABLED";
-    private static final String AZURE_DATALAKE_SHARED_ACCOUNT="AZURE_DATALAKE_SHARED_ACCOUNT";
-    private static final String AZURE_STORAGE_SHARED_ACCOUNT="AZURE_STORAGE_SHARED_ACCOUNT";
-	private static final String AZURE_AUTHENTICATION_FILE = "AZURE_AUTHENTICATION_FILE";
-
-	private static final String GCP_DLAB_PROJECT_ID = "GCP_DLAB_PROJECT_ID";
-    private static final String GCP_REGION="GCP_REGION";
-	private static final String GCP_REQUEST_TIMEOUT = "GCP_REQUEST_TIMEOUT";
-	private static final String GCP_AUTHENTICATION_FILE = "GCP_AUTHENTICATION_FILE";
-
-    private static final String TIMEOUT_JENKINS_AUTOTEST="TIMEOUT_JENKINS_AUTOTEST";
-    private static final String TIMEOUT_UPLOAD_KEY="TIMEOUT_UPLOAD_KEY";
-    private static final String TIMEOUT_SSN_STARTUP="TIMEOUT_SSN_STARTUP";
-
-    private static final String CLUSTER_OS_USERNAME = "CLUSTER_OS_USERNAME";
-    private static final String CLUSTER_OS_FAMILY = "CLUSTER_OS_FAMILY";
-    private static final String CONF_TAG_RESOURCE_ID = "CONF_TAG_RESOURCE_ID";
-
-	private static final String JUPYTER_SCENARIO_FILES = "JUPYTER_SCENARIO_FILES";
-	private static final String NOTEBOOKS_TO_TEST = "NOTEBOOKS_TO_TEST";
-	private static final String SKIPPED_LIBS = "SKIPPED_LIBS";
-	private static final String EXECUTION_TREADS = "execution.threads";
-
-    private static final String USE_JENKINS = "USE_JENKINS";
-    private static final String SSN_URL = "SSN_URL";
-    private static final String SERVICE_BASE_NAME = "SERVICE_BASE_NAME";
-    private static final String RUN_MODE_LOCAL = "RUN_MODE_LOCAL";
-    private static final String LOCALHOST_IP = "LOCALHOST_IP";
-
-    private static String jenkinsBuildNumber;
-
-
-    private static final Properties props = new Properties();
-
-    static {
-        CONFIG_FILE_NAME = PropertiesResolver.getConfFileLocation();
-        jenkinsBuildNumber = System.getProperty("jenkins.buildNumber", "");
-        if (jenkinsBuildNumber.isEmpty()) {
-            jenkinsBuildNumber = null;
-            LOGGER.info("Jenkins build number missed");
-        }
-        
-    	loadProperties();
-    }
-    
-    private ConfigPropertyValue() { }
-	
-    private static Duration getDuration(String duaration) {
-    	return Duration.parse("PT" + duaration);
-    }
-    
-	public static String get(String propertyName) {
-		return get(propertyName, "");
-	}
-
-	public static String get(String propertyName, String defaultValue) {
-		return props.getProperty(propertyName, defaultValue);
-	}
-
-	private static int getInt(String value) {
-        return Integer.parseInt(value);
-    }
-	
-	public static int get(String propertyName, int defaultValue) {
-		if (props.values().isEmpty()) {
-			loadProperties();
-		}
-		String s = props.getProperty(propertyName, String.valueOf(defaultValue)); 
-		return Integer.parseInt(s);
-	}
-	
-	private static void printProperty(String propertyName) {
-        LOGGER.info("{} is {}", propertyName , props.getProperty(propertyName));
-	}
-	
-	private static void setKeyProperty(String propertyName) {
-		String s = props.getProperty(propertyName, "");
-		if (!s.isEmpty()) {
-            s = Paths.get(PropertiesResolver.getKeysLocation(), s).toAbsolutePath().toString();
-            props.setProperty(propertyName, s);
-        }
-	}
-	
-	private static void loadProperties() {
-        try (FileReader fin = new FileReader(new File(CONFIG_FILE_NAME))) {
-
-            props.load(fin);
-
-            PropertiesResolver.overlapProperty(props, CLUSTER_OS_USERNAME, true);
-            PropertiesResolver.overlapProperty(props, CLUSTER_OS_FAMILY, true);
-            PropertiesResolver.overlapProperty(props, AWS_REGION, true);
-            PropertiesResolver.overlapProperty(props, AZURE_REGION, true);
-			PropertiesResolver.overlapProperty(props, GCP_DLAB_PROJECT_ID, true);
-            PropertiesResolver.overlapProperty(props, GCP_REGION, true);
-            PropertiesResolver.overlapProperty(props, NOTEBOOKS_TO_TEST, false);
-			PropertiesResolver.overlapProperty(props, SKIPPED_LIBS, true);
-			PropertiesResolver.overlapProperty(props, USE_JENKINS, true);
-            PropertiesResolver.overlapProperty(props, JENKINS_JOB_URL, !isUseJenkins());
-            PropertiesResolver.overlapProperty(props, SSN_URL, isUseJenkins());
-            PropertiesResolver.overlapProperty(props, SERVICE_BASE_NAME, isUseJenkins());
-            PropertiesResolver.overlapProperty(props, RUN_MODE_LOCAL, true);
-            
-            setKeyProperty(ACCESS_KEY_PRIV_FILE_NAME);
-            setKeyProperty(ACCESS_KEY_PUB_FILE_NAME);
-        } catch (Exception e) {
-        	LOGGER.fatal("Load properties from file {} fails.", CONFIG_FILE_NAME, e);
-			throw new LoadFailException("Load properties from \"" + CONFIG_FILE_NAME + "\" fails. " +
-					e.getLocalizedMessage(), e);
-        }
-        
-        printProperty(JENKINS_USERNAME);
-		printProperty(JENKINS_PASS);
-        printProperty(USERNAME);
-		printProperty(PASS);
-        printProperty(NOT_IAM_USERNAME);
-		printProperty(NOT_IAM_PASS);
-        printProperty(NOT_DLAB_USERNAME);
-		printProperty(NOT_DLAB_PASS);
-        printProperty(JENKINS_JOB_URL);
-        printProperty(USER_FOR_ACTIVATE_KEY);
-		printProperty(PASS_FOR_ACTIVATE_KEY);
-        printProperty(ACCESS_KEY_PRIV_FILE_NAME);
-        printProperty(ACCESS_KEY_PUB_FILE_NAME);
-        
-        printProperty(TIMEOUT_JENKINS_AUTOTEST);
-        printProperty(TIMEOUT_UPLOAD_KEY);
-        printProperty(TIMEOUT_SSN_STARTUP);
-
-        printProperty(JUPYTER_SCENARIO_FILES);
-        printProperty(CLOUD_PROVIDER);
-
-        printProperty(AZURE_DATALAKE_ENABLED);
-        printProperty(AZURE_DATALAKE_SHARED_ACCOUNT);
-        printProperty(AZURE_STORAGE_SHARED_ACCOUNT);
-        printProperty(NOTEBOOKS_TO_TEST);
-		printProperty(SKIPPED_LIBS);
-		printProperty(CLUSTER_OS_USERNAME);
-        printProperty(CLUSTER_OS_FAMILY);
-        printProperty(CONF_TAG_RESOURCE_ID);
-
-        printProperty(USE_JENKINS);
-        printProperty(RUN_MODE_LOCAL);
-        printProperty(LOCALHOST_IP);
-	}
-    
-    
-    public static String getJenkinsBuildNumber() {
-    	return jenkinsBuildNumber;
-    }
-
-    public static void setJenkinsBuildNumber(String jenkinsBuildNumber) {
-    	ConfigPropertyValue.jenkinsBuildNumber = jenkinsBuildNumber;
-    }
-
-    public static String getJenkinsUsername() {
-    	return get(JENKINS_USERNAME);
-    }
-    
-    public static String getJenkinsPassword() {
-		return get(JENKINS_PASS);
-    }
-
-    public static String getUsername() {
-    	return get(USERNAME);
-    }
-    
-    public static String getUsernameSimple() {
-    	String s = get(USERNAME);
-		int i = s.indexOf('@');
-		return (i == -1 ? s : s.substring(0, i).toLowerCase());
-	}
-
-    public static String getPassword() {
-		return get(PASS);
-    }
-
-    public static String getNotIAMUsername() {
-    	return get(NOT_IAM_USERNAME);
-    }
-
-    public static String getNotIAMPassword() {
-		return get(NOT_IAM_PASS);
-    }
-
-    public static String getNotDLabUsername() {
-    	return get(NOT_DLAB_USERNAME);
-    }
-
-    public static String getNotDLabPassword() {
-		return get(NOT_DLAB_PASS);
-    }
-
-    public static String getJenkinsJobURL() {
-    	return get(JENKINS_JOB_URL);
-    }
-
-    public static String getUserForActivateKey() {
-    	return get(USER_FOR_ACTIVATE_KEY);
-    }
-
-    public static String getPasswordForActivateKey() {
-		return get(PASS_FOR_ACTIVATE_KEY);
-    }
-
-
-    public static String getAccessKeyPrivFileName() {
-    	File file = new File(get(ACCESS_KEY_PRIV_FILE_NAME));
-        return file.getAbsolutePath();
-    }
-
-    public static String getAccessKeyPubFileName() {
-    	File file = new File(get(ACCESS_KEY_PUB_FILE_NAME));
-        return file.getAbsolutePath();
-    }
-
-    public static String getCloudProvider(){
-        return get(CLOUD_PROVIDER);
-    }
-
-    public static String getAzureAuthFileName(){
-        File file = new File(get(AZURE_AUTHENTICATION_FILE));
-        return file.getAbsolutePath();
-    }
-
-	public static String getGcpAuthFileName() {
-		File file = new File(get(GCP_AUTHENTICATION_FILE));
-		return file.getAbsolutePath();
-	}
-
-    public static String getAwsAccessKeyId() {
-        return get(AWS_ACCESS_KEY_ID);
-    }
-
-    public static String getAwsSecretAccessKey() {
-        return get(AWS_SECRET_ACCESS_KEY);
-    }
-
-	public static String getAwsRegion() {
-	    return get(AWS_REGION);
-	}
-
-	public static Duration getAwsRequestTimeout() {
-    	return getDuration(get(AWS_REQUEST_TIMEOUT, "10s"));
-    }
-
-    public static String getAzureRegion() {
-        return get(AZURE_REGION);
-    }
-
-    public static String getAzureDatalakeEnabled() {
-        return get(AZURE_DATALAKE_ENABLED);
-    }
-
-    public static String getAzureDatalakeSharedAccount() {
-        return get(AZURE_DATALAKE_SHARED_ACCOUNT);
-    }
-
-    public static String getAzureStorageSharedAccount() {
-        return get(AZURE_STORAGE_SHARED_ACCOUNT);
-    }
-
-	public static String getGcpDlabProjectId() {
-		return get(GCP_DLAB_PROJECT_ID);
-	}
-
-    public static String getGcpRegion() {
-        return get(GCP_REGION);
-    }
-
-	public static Duration getGcpRequestTimeout() {
-		return getDuration(get(GCP_REQUEST_TIMEOUT, "10s"));
-	}
-
-    public static Duration getAzureRequestTimeout() {
-        return getDuration(get(AZURE_REQUEST_TIMEOUT, "10s"));
-    }
-
-    public static Duration getTimeoutJenkinsAutotest() {
-    	return getDuration(get(TIMEOUT_JENKINS_AUTOTEST, "0s"));
-    }
-
-    public static int getExecutionThreads() {
-        return getInt(get(EXECUTION_TREADS, "-1"));
-    }
-
-    public static Duration getTimeoutUploadKey() {
-    	return getDuration(get(TIMEOUT_UPLOAD_KEY, "0s"));
-    }
-
-    public static Duration getTimeoutSSNStartup() {
-    	return getDuration(get(TIMEOUT_SSN_STARTUP, "0s"));
-    }
-
-
-    public static String getClusterOsUser() {
-    	return get(CLUSTER_OS_USERNAME);
-    }
-
-    public static String getClusterOsFamily() {
-    	return get(CLUSTER_OS_FAMILY);
-    }
-
-    public static String getNotebookTemplates() {
-    	return get(NOTEBOOKS_TO_TEST);
-    }
-
-	public static String getSkippedLibs() {
-		return get(SKIPPED_LIBS, "[]");
-	}
-
-	public static boolean isUseJenkins() {
-        String s = get(USE_JENKINS, "true");
-    	return Boolean.valueOf(s);
-    }
-    
-    public static String getSsnUrl() {
-        return get(SSN_URL);
-    }
-    
-    public static String getServiceBaseName() {
-        return get(SERVICE_BASE_NAME);
-    }
-    
-    public static boolean isRunModeLocal() {
-    	String s = get(RUN_MODE_LOCAL, "false");
-    	return Boolean.valueOf(s);
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/helper/NamingHelper.java b/integration-tests/src/main/java/com/epam/dlab/automation/helper/NamingHelper.java
deleted file mode 100644
index 3094f1c..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/helper/NamingHelper.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.helper;
-
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-public class NamingHelper {
-	public static final String CLUSTER_ABSENT = "cluster_absent";
-	public static final String DATA_ENGINE = "dataengine";
-	public static final String DATA_ENGINE_SERVICE = "dataengine-service";
-	public static final String DEEPLEARNING = "deeplearning";
-	public static final String JUPYTER = "jupyter";
-	public static final String TENSOR = "tensor";
-	public static final String RSTUDIO = "rstudio";
-	public static final String ZEPPELIN = "zeppelin";
-
-	private static final Map<String, String> SIMPLE_NOTEBOOK_NAMES = new HashMap<>();
-
-    private static AtomicInteger idCounter = new AtomicInteger(0);
-    
-    private static String serviceBaseName;
-    private static String ssnURL;
-    private static String ssnIp;
-    private static String ssnToken;
-
-	static {
-		SIMPLE_NOTEBOOK_NAMES.put(DEEPLEARNING, "dlr");
-		SIMPLE_NOTEBOOK_NAMES.put(JUPYTER, "jup");
-		SIMPLE_NOTEBOOK_NAMES.put(TENSOR, "tfl");
-		SIMPLE_NOTEBOOK_NAMES.put(RSTUDIO, "rst");
-		SIMPLE_NOTEBOOK_NAMES.put(ZEPPELIN, "zep");
-	}
-
-    private NamingHelper(){}
-
-	public static Map<String, String> getSimpleNotebookNames() {
-		return SIMPLE_NOTEBOOK_NAMES;
-	}
-
-	public static String getServiceBaseName() {
-    	return serviceBaseName;
-    }
-    
-    public static void setServiceBaseName(String serviceBaseName) {
-    	if (NamingHelper.serviceBaseName != null) {
-    		throw new IllegalArgumentException("Field serviceBaseName already has a value");
-    	}
-    	NamingHelper.serviceBaseName = serviceBaseName;
-    }
-    
-    public static String getSsnURL() {
-    	return ssnURL;
-    }
-    
-    public static void setSsnURL(String ssnURL) {
-    	if (NamingHelper.ssnURL != null) {
-    		throw new IllegalArgumentException("Field ssnURL already has a value");
-    	}
-    	NamingHelper.ssnURL = ssnURL;
-    }
-
-    public static String getSsnName() {
-    	return serviceBaseName + "-ssn";
-    }
-    
-    public static String getSsnIp() {
-    	return ssnIp;
-    }
-    
-    public static void setSsnIp(String ssnIp) {
-    	if (NamingHelper.ssnIp != null) {
-    		throw new IllegalArgumentException("Field ssnIp already has a value");
-    	}
-    	NamingHelper.ssnIp = ssnIp;
-    }
-
-    public static String getSsnToken() {
-    	return ssnToken;
-    }
-    
-    public static void setSsnToken(String ssnToken) {
-    	if (NamingHelper.ssnToken != null) {
-    		throw new IllegalArgumentException("Field ssnToken already has a value");
-    	}
-    	NamingHelper.ssnToken = ssnToken;
-    }
-    
-    public static String getEdgeName() {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-				return String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(), "edge");
-            case CloudProvider.AZURE_PROVIDER:
-			case CloudProvider.GCP_PROVIDER:
-				return String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(), "edge")
-                        .replace('_', '-');
-			default:
-                return null;
-        }
-    }
-    
-    public static String getNotebookInstanceName(String notebookName) {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-				return String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(), "nb", notebookName);
-            case CloudProvider.AZURE_PROVIDER:
-			case CloudProvider.GCP_PROVIDER:
-				return String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(), "nb", notebookName)
-                        .replace('_', '-');
-			default:
-                return null;
-        }
-    }
-    
-    public static String getClusterInstanceName(String notebookName, String clusterName, String dataEngineType) {
-		if (DATA_ENGINE.equals(dataEngineType)) {
-            switch (ConfigPropertyValue.getCloudProvider()) {
-                case CloudProvider.AWS_PROVIDER:
-					return String.join("-", getClusterInstanceNameForTestDES(notebookName, clusterName,
-							dataEngineType), "m");
-                case CloudProvider.AZURE_PROVIDER:
-				case CloudProvider.GCP_PROVIDER:
-					return String.join("-", getClusterInstanceNameForTestDES(notebookName, clusterName,
-							dataEngineType), "m").replace('_', '-');
-				default:
-                    return null;
-            }
-    	}
-    	else {
-    		return getClusterInstanceNameForTestDES(notebookName,clusterName,dataEngineType);
-    	}
-    }
-    
-    public static String getClusterInstanceNameForTestDES(String notebookName, String clusterName, String dataEngineType) {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-				return DATA_ENGINE.equals(dataEngineType) ?
-						String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(),
-								"de", notebookName, clusterName) :
-						String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(),
-								"des", notebookName, clusterName);
-
-            case CloudProvider.AZURE_PROVIDER:
-				return DATA_ENGINE.equals(dataEngineType) ?
-						String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(),
-								"de", notebookName, clusterName).replace('_', '-') :
-						String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(),
-								"des", notebookName, clusterName).replace('_', '-');
-
-			case CloudProvider.GCP_PROVIDER:
-				return DATA_ENGINE.equals(dataEngineType) ?
-						String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(),
-								"de", notebookName, clusterName).replace('_', '-') :
-						String.join("-", serviceBaseName, ConfigPropertyValue.getUsernameSimple(),
-								"des", notebookName, clusterName, "m").replace('_', '-');
-			default:
-                return null;
-        }
-
-    }
-
-	public static String getNotebookContainerName(String notebookName, String action) {
-    	return String.join("_", ConfigPropertyValue.getUsernameSimple(), action, "exploratory", notebookName);
-    }
-
-	public static String getClusterContainerName(String notebookName, String clusterName, String action) {
-		return String.join("_", ConfigPropertyValue.getUsernameSimple(), action, "computational",
-				notebookName, clusterName);
-    }
-    
-    public static String generateRandomValue() {
-		SimpleDateFormat df = new SimpleDateFormat("yyyyMMddhmmss");
-        return String.join("_",  "ITest", df.format(new Date()), String.valueOf(idCounter.incrementAndGet()));
-    }
-
-    public static String generateRandomValue(String notebokTemplateName) {
-		return String.join("_", SIMPLE_NOTEBOOK_NAMES.get(notebokTemplateName),
-				String.valueOf(idCounter.incrementAndGet()));
-    }
-    
-    public static String getSelfServiceURL(String path) {
-        return ssnURL + path;
-    }
-    
-    public static String getStorageName() {
-        switch (ConfigPropertyValue.getCloudProvider()) {
-            case CloudProvider.AWS_PROVIDER:
-			case CloudProvider.GCP_PROVIDER:
-                return String.format("%s-%s-%s", serviceBaseName, ConfigPropertyValue.getUsernameSimple(),
-                        CloudHelper.getStorageNameAppendix()).replace('_', '-').toLowerCase();
-            case CloudProvider.AZURE_PROVIDER:
-                return String.format("%s-%s-%s", serviceBaseName, "shared",
-                        CloudHelper.getStorageNameAppendix()).replace('_', '-').toLowerCase();
-			default:
-                return null;
-        }
-    }
-
-	public static String getClusterName(String clusterInstanceName, String dataEngineType, boolean restrictionMode)
-			throws IOException {
-		switch (ConfigPropertyValue.getCloudProvider()) {
-			case CloudProvider.AWS_PROVIDER:
-			case CloudProvider.AZURE_PROVIDER:
-				return DATA_ENGINE.equals(dataEngineType) ? clusterInstanceName :
-						CloudHelper.getInstanceNameByCondition(clusterInstanceName, restrictionMode);
-
-			case CloudProvider.GCP_PROVIDER:
-				return DATA_ENGINE.equals(dataEngineType) ? clusterInstanceName :
-						CloudHelper.getGcpDataprocClusterName(
-								CloudHelper.getInstanceNameByCondition(clusterInstanceName, restrictionMode));
-			default:
-				return null;
-		}
-    }
-
-	public static String getNotebookTestTemplatesPath(String notebookName) {
-		if (notebookName.contains(getSimpleNotebookNames().get(DEEPLEARNING))) {
-            return "test_templates/deeplearning/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(JUPYTER))) {
-            return "test_templates/jupyter/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(RSTUDIO))) {
-            return "test_templates/rstudio/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(TENSOR))) {
-            return "test_templates/tensor/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(ZEPPELIN))) {
-            return "test_templates/zeppelin/";
-        }
-        else return "";
-
-    }
-
-    public static String getNotebookType(String notebookName){
-		if (notebookName.contains(getSimpleNotebookNames().get(DEEPLEARNING))) {
-			return DEEPLEARNING + "/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(JUPYTER))) {
-			return JUPYTER + "/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(RSTUDIO))) {
-			return RSTUDIO + "/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(TENSOR))) {
-			return TENSOR + "/";
-		} else if (notebookName.contains(getSimpleNotebookNames().get(ZEPPELIN))) {
-			return ZEPPELIN + "/";
-        }
-        else return "";
-
-    }
-
-	public static boolean isClusterRequired(String notebookName) {
-		if (notebookName.contains(getSimpleNotebookNames().get(DEEPLEARNING))) {
-			return false;
-		} else if (notebookName.contains(getSimpleNotebookNames().get(JUPYTER))) {
-			return true;
-		} else if (notebookName.contains(getSimpleNotebookNames().get(RSTUDIO))) {
-			return true;
-		} else if (notebookName.contains(getSimpleNotebookNames().get(TENSOR))) {
-			return false;
-		} else if (notebookName.contains(getSimpleNotebookNames().get(ZEPPELIN))) {
-			return true;
-		}
-		return true;
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/helper/PropertiesResolver.java b/integration-tests/src/main/java/com/epam/dlab/automation/helper/PropertiesResolver.java
deleted file mode 100644
index f71ccc3..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/helper/PropertiesResolver.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.helper;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Paths;
-import java.util.Properties;
-
-public class PropertiesResolver {
-
-    private static final Logger LOGGER = LogManager.getLogger(PropertiesResolver.class);
-    public static final boolean DEV_MODE;
-	private static final String CONFIG_FILE_NAME = "application.properties";
-	public static final String NOTEBOOK_SCENARIO_FILES_LOCATION_PROPERTY_TEMPLATE = "scenario.%s.files.location";
-	public static final String NOTEBOOK_TEST_TEMPLATES_LOCATION = "%s.test.templates.location";
-	public static final String NOTEBOOK_CONFIGURATION_FILE_TEMPLATE = "%s/%s-notebook.json";
-
-    //keys from application.properties(dev-application.properties)
-	private static final String CONF_FILE_LOCATION_PROPERTY = "conf.file.location";
-	private static final String KEYS_DIRECTORY_LOCATION_PROPERTY = "keys.directory.location";
-	private static final String NOTEBOOK_TEST_DATA_COPY_SCRIPT = "notebook.test.data.copy.script";
-	private static final String NOTEBOOK_TEST_LIB_LOCATION = "notebook.test.lib.location";
-
-	private static final String SCENARIO_JUPYTER_FILES_LOCATION_PROPERTY = "scenario.jupyter.files.location";
-	private static final String SCENARIO_RSTUDIO_FILES_LOCATION_PROPERTY = "scenario.rstudio.files.location";
-	private static final String SCENARIO_ZEPPELIN_FILES_LOCATION_PROPERTY = "scenario.zeppelin.files.location";
-	private static final String SCENARIO_TENSOR_FILES_LOCATION_PROPERTY = "scenario.tensor.files.location";
-	private static final String SCENARIO_DEEPLEARNING_FILES_LOCATION_PROPERTY = "scenario.deeplearning.files.location";
-
-	private static final String JUPYTER_TEST_TEMPLATES_LOCATION_PROPERTY = "jupyter.test.templates.location";
-	private static final String RSTUDIO_TEST_TEMPLATES_LOCATION_PROPERTY = "rstudio.test.templates.location";
-	private static final String ZEPPELIN_TEST_TEMPLATES_LOCATION_PROPERTY = "zeppelin.test.templates.location";
-	private static final String TENSOR_TEST_TEMPLATES_LOCATION_PROPERTY = "tensor.test.templates.location";
-	private static final String DEEPLEARNING_TEST_TEMPLATES_LOCATION_PROPERTY = "deeplearning.test.templates.location";
-
-	private static final String CLUSTER_CONFIG_FILE_LOCATION_PROPERTY = "ec2.config.files.location";
-	private static final String AZURE_CONFIG_FILE_LOCATION_PROPERTY = "azure.config.files.location";
-	private static final String GCP_CONFIG_FILE_LOCATION_PROPERTY = "gcp.config.files.location";
-
-	private PropertiesResolver() {
-	}
-
-    public static String getJupyterTestTemplatesLocationProperty() {
-        return JUPYTER_TEST_TEMPLATES_LOCATION_PROPERTY;
-    }
-
-    public static String getRstudioTestTemplatesLocationProperty() {
-        return RSTUDIO_TEST_TEMPLATES_LOCATION_PROPERTY;
-    }
-
-    public static String getZeppelinTestTemplatesLocationProperty() {
-        return ZEPPELIN_TEST_TEMPLATES_LOCATION_PROPERTY;
-    }
-
-    public static String getTensorTestTemplatesLocationProperty() {
-        return TENSOR_TEST_TEMPLATES_LOCATION_PROPERTY;
-    }
-
-    public static String getDeeplearningTestTemplatesLocationProperty() {
-        return DEEPLEARNING_TEST_TEMPLATES_LOCATION_PROPERTY;
-    }
-
-    private static Properties properties = new Properties();
-
-    static {
-        DEV_MODE = System.getProperty("run.mode", "remote").equalsIgnoreCase("dev");
-        loadApplicationProperties();
-    }
-
-	private static String getProperty(String propertyName, boolean isOptional) {
-		String s = System.getProperty(propertyName, "");
-		if (s.isEmpty() && !isOptional) {
-        	throw new IllegalArgumentException("Missed required JVM argument -D" + propertyName);
-        }
-        return s;
-	}
-	
-	public static void overlapProperty(Properties props, String propertyName, boolean isOptional) {
-		String argName = StringUtils.replaceChars(propertyName, '_', '.').toLowerCase();
-		String s = System.getProperty(argName, "");
-		if (!s.isEmpty()) {
-            props.setProperty(propertyName, s);
-        }
-		if(!isOptional && props.getProperty(propertyName, "").isEmpty()) {
-        	throw new IllegalArgumentException("Missed required argument -D" + argName + " or property " + propertyName);
-        }
-	}
-
-
-    private static String getConfRootPath() {
-    	return getProperty("conf.root.path", false);
-    }
-
-    private static void loadApplicationProperties() {
-        InputStream input = null;
-
-        try {
-            input = PropertiesResolver.class.getClassLoader().getResourceAsStream(CONFIG_FILE_NAME);
-
-            // load a properties file
-            properties.load(input);
-            String rootPath = getConfRootPath();
-            for (String key : properties.keySet().toArray(new String[0])) {
-            	String path = StringUtils.replace(properties.getProperty(key), "${CONF_ROOT_PATH}", rootPath);
-            	path = Paths.get(path).toAbsolutePath().toString();
-            	properties.setProperty(key, path);
-            }
-            overlapProperty(properties, CONF_FILE_LOCATION_PROPERTY, false);
-
-            // get the property value and print it out
-            LOGGER.info(properties.getProperty(CONF_FILE_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(KEYS_DIRECTORY_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(NOTEBOOK_TEST_DATA_COPY_SCRIPT));
-            LOGGER.info(properties.getProperty(NOTEBOOK_TEST_LIB_LOCATION));
-            LOGGER.info(properties.getProperty(SCENARIO_JUPYTER_FILES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(SCENARIO_RSTUDIO_FILES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(SCENARIO_ZEPPELIN_FILES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(SCENARIO_TENSOR_FILES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(SCENARIO_DEEPLEARNING_FILES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(JUPYTER_TEST_TEMPLATES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(RSTUDIO_TEST_TEMPLATES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(ZEPPELIN_TEST_TEMPLATES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(TENSOR_TEST_TEMPLATES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(DEEPLEARNING_TEST_TEMPLATES_LOCATION_PROPERTY));
-            LOGGER.info(properties.getProperty(CLUSTER_CONFIG_FILE_LOCATION_PROPERTY));
-
-        } catch (IOException ex) {
-            LOGGER.error(ex);
-            LOGGER.error("Application configuration file could not be found by the path: {}", CONFIG_FILE_NAME);
-            System.exit(0);
-        } finally {
-            if (input != null) {
-                try {
-                    input.close();
-                } catch (IOException e) {
-                    LOGGER.error(e);
-                    LOGGER.error("Application configuration file could not be found by the path: {}", CONFIG_FILE_NAME);
-                }
-            }
-        }
-    }
-
-
-    public static String getConfFileLocation() {
-        return properties.getProperty(CONF_FILE_LOCATION_PROPERTY);
-    }
-
-    public static String getKeysLocation() {
-        return properties.getProperty(KEYS_DIRECTORY_LOCATION_PROPERTY);
-    }
-
-    public static String getNotebookTestDataCopyScriptLocation() {
-        return properties.getProperty(NOTEBOOK_TEST_DATA_COPY_SCRIPT);
-    }
-
-    public static String getNotebookTestLibLocation() {
-        return properties.getProperty(NOTEBOOK_TEST_LIB_LOCATION);
-    }
-
-    public static String getScenarioJupyterFilesLocation() {
-        return properties.getProperty(SCENARIO_JUPYTER_FILES_LOCATION_PROPERTY);
-    }
-
-    public static String getScenarioRstudioFilesLocation() {
-        return properties.getProperty(SCENARIO_RSTUDIO_FILES_LOCATION_PROPERTY);
-    }
-
-    public static String getScenarioZeppelinFilesLocation() {
-        return properties.getProperty(SCENARIO_ZEPPELIN_FILES_LOCATION_PROPERTY);
-    }
-
-    public static String getScenarioTensorFilesLocation() {
-        return properties.getProperty(SCENARIO_TENSOR_FILES_LOCATION_PROPERTY);
-    }
-
-    public static String getScenarioDeeplearningFilesLocation() {
-        return properties.getProperty(SCENARIO_DEEPLEARNING_FILES_LOCATION_PROPERTY);
-    }
-
-    public static String getClusterEC2ConfFileLocation() {
-        return properties.getProperty(CLUSTER_CONFIG_FILE_LOCATION_PROPERTY );
-    }
-
-    public static String getClusterAzureConfFileLocation() {
-        return properties.getProperty(AZURE_CONFIG_FILE_LOCATION_PROPERTY );
-    }
-
-    public static String getClusterGcpConfFileLocation() {
-        return properties.getProperty(GCP_CONFIG_FILE_LOCATION_PROPERTY);
-    }
-
-    public static String getPropertyByName(String propertyName) {
-        return properties.getProperty(propertyName);
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/helper/WaitForStatus.java b/integration-tests/src/main/java/com/epam/dlab/automation/helper/WaitForStatus.java
deleted file mode 100644
index 4cb4129..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/helper/WaitForStatus.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.helper;
-
-import com.epam.dlab.automation.http.ContentType;
-import com.epam.dlab.automation.http.HttpRequest;
-import com.epam.dlab.automation.http.HttpStatusCode;
-import com.jayway.restassured.path.json.JsonPath;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import java.time.Duration;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.function.Predicate;
-import java.util.stream.Stream;
-
-public class WaitForStatus {
-
-	private static final Logger LOGGER = LogManager.getLogger(WaitForStatus.class);
-	private static final String EXPLORATORY_PATH = "exploratory";
-
-	private static long getSsnRequestTimeout() {
-		return ConfigPropertyValue.isRunModeLocal() ? 1000 : 10000;
-	}
-
-	private WaitForStatus() {
-	}
-
-	public static boolean selfService(Duration duration) throws InterruptedException {
-		HttpRequest request = new HttpRequest();
-		int actualStatus;
-		long timeout = duration.toMillis();
-		long expiredTime = System.currentTimeMillis() + timeout;
-
-		while ((actualStatus = request.webApiGet(NamingHelper.getSsnURL(), ContentType.TEXT).statusCode()) !=
-				HttpStatusCode.OK) {
-			if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-				break;
-			}
-			Thread.sleep(getSsnRequestTimeout());
-		}
-
-		if (actualStatus != HttpStatusCode.OK) {
-			LOGGER.info("ERROR: Timeout has been expired for SSN available. Timeout was {}", duration);
-			return false;
-		} else {
-			LOGGER.info("Current status code for SSN is {}", actualStatus);
-		}
-
-		return true;
-	}
-
-	public static int uploadKey(String url, String token, int status, Duration duration)
-			throws InterruptedException {
-		LOGGER.info(" Waiting until status code {} with URL {} with token {}", status, url, token);
-		HttpRequest request = new HttpRequest();
-		int actualStatus;
-		long timeout = duration.toMillis();
-		long expiredTime = System.currentTimeMillis() + timeout;
-
-		while ((actualStatus = request.webApiGet(url, token).getStatusCode()) == status) {
-			if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-				break;
-			}
-			Thread.sleep(getSsnRequestTimeout());
-		}
-
-		if (actualStatus == status) {
-			LOGGER.info("ERROR: {}: Timeout has been expired for request.");
-			LOGGER.info("  URL is {}", url);
-			LOGGER.info("  token is {}", token);
-			LOGGER.info("  status is {}", status);
-			LOGGER.info("  timeout is {}", duration);
-		} else {
-			LOGGER.info(" Current status code for {} is {}", url, actualStatus);
-		}
-
-		return actualStatus;
-	}
-
-	public static String notebook(String url, String token, String notebookName, String status, Duration duration)
-			throws InterruptedException {
-		LOGGER.info("Waiting for status {} with URL {} with token {} for notebook {}", status, url, token,
-				notebookName);
-		HttpRequest request = new HttpRequest();
-		String actualStatus;
-		long timeout = duration.toMillis();
-		long expiredTime = System.currentTimeMillis() + timeout;
-
-		do {
-			actualStatus = getNotebookStatus(request.webApiGet(url, token)
-					.getBody()
-					.jsonPath(), notebookName);
-			if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-				break;
-			}
-			Thread.sleep(getSsnRequestTimeout());
-		}
-		while (status.contains(actualStatus));
-
-		if (status.contains(actualStatus)) {
-			LOGGER.info("ERROR: {}: Timeout has been expired for request.", notebookName);
-			LOGGER.info("  {}: URL is {}", notebookName, url);
-			LOGGER.info("  {}: token is {}", notebookName, token);
-			LOGGER.info("  {}: status is {}", notebookName, status);
-			LOGGER.info("  {}: timeout is {}", notebookName, duration);
-		} else {
-			LOGGER.info("{}: Current state for Notebook {} is {}", notebookName, notebookName, actualStatus);
-		}
-
-		return actualStatus;
-	}
-
-	public static String cluster(String url, String token, String notebookName, String computationalName, String
-			status, Duration duration)
-			throws InterruptedException {
-		LOGGER.info("{}: Waiting until status {} with URL {} with token {} for computational {} on notebook {}",
-				notebookName, status, url, token, computationalName, notebookName);
-		HttpRequest request = new HttpRequest();
-		String actualStatus;
-		long timeout = duration.toMillis();
-		long expiredTime = System.currentTimeMillis() + timeout;
-
-		do {
-			actualStatus = getClusterStatus(request.webApiGet(url, token)
-					.getBody()
-					.jsonPath(), notebookName, computationalName);
-			if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-				break;
-			}
-			Thread.sleep(getSsnRequestTimeout());
-		}
-		while (actualStatus.contains(status));
-
-		if (actualStatus.contains(status)) {
-			LOGGER.info("ERROR: Timeout has been expired for request.");
-			LOGGER.info("  URL is {}", url);
-			LOGGER.info("  token is {}", token);
-			LOGGER.info("  status is {}", status);
-			LOGGER.info("  timeout is {}", duration);
-		} else {
-			LOGGER.info("{}: Current state for cluster {} on notebook is {}", notebookName, computationalName,
-					actualStatus);
-		}
-
-		return actualStatus;
-	}
-
-	@SuppressWarnings("unchecked")
-	public static String getClusterStatus(JsonPath json, String notebookName, String computationalName) {
-		return (String) json.getList(EXPLORATORY_PATH)
-				.stream()
-				.filter(exploratoryNamePredicate(notebookName))
-				.flatMap(computationalResourcesStream())
-				.filter(computationalNamePredicate(computationalName))
-				.map(statusFieldPredicate())
-				.findAny()
-				.orElse(StringUtils.EMPTY);
-	}
-
-	private static String getNotebookStatus(JsonPath json, String notebookName) {
-		List<Map<String, String>> notebooks = json.getList(EXPLORATORY_PATH);
-		return notebooks.stream().filter(exploratoryNamePredicate(notebookName))
-				.map(e -> e.get("status"))
-				.findAny()
-				.orElse(StringUtils.EMPTY);
-	}
-
-	private static Function<Object, Object> statusFieldPredicate() {
-		return cr -> (((HashMap) cr).get("status"));
-	}
-
-	private static Predicate<Object> computationalNamePredicate(String computationalName) {
-		return cr -> computationalName.equals(((HashMap) cr).get("computational_name"));
-	}
-
-	private static Function<Object, Stream<?>> computationalResourcesStream() {
-		return d -> ((List) ((HashMap) d).get("computational_resources")).stream();
-	}
-
-	private static Predicate<Object> exploratoryNamePredicate(String notebookName) {
-		return d -> notebookName.equals(((HashMap) d).get("exploratory_name"));
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/http/ApiPath.java b/integration-tests/src/main/java/com/epam/dlab/automation/http/ApiPath.java
deleted file mode 100644
index c3dc9f1..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/http/ApiPath.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.http;
-
-public class ApiPath {
-
-    public static final String LOGIN = "/api/user/login";
-    public static final String LOGOUT = "/api/user/logout";
-    public static final String UPLOAD_KEY = "/api/user/access_key"; 
-    public static final String AUTHORIZE_USER = "/api/user/authorize";
-    public static final String EXP_ENVIRONMENT = "/api/infrastructure_provision/exploratory_environment";
-    public static final String PROVISIONED_RES = "/api/infrastructure/info";
-    public static final String COMPUTATIONAL_RES = "/api/infrastructure_provision/computational_resources/dataengine-service";
-    public static final String COMPUTATIONAL_RES_SPARK = "/api/infrastructure_provision/computational_resources/dataengine";
-    private static final String STOP_NOTEBOOK = EXP_ENVIRONMENT + "/%s/stop";
-    private static final String TERMINATE_CLUSTER =
-			"/api/infrastructure_provision/computational_resources/%s/%s/terminate";
-	private static final String START_CLUSTER = "/api/infrastructure_provision/computational_resources/%s/%s/start";
-	private static final String STOP_CLUSTER = "/api/infrastructure_provision/computational_resources/%s/%s/stop";
-    private static final String TERMINATE_NOTEBOOK = EXP_ENVIRONMENT + "/%s/terminate";
-    public static final String LIB_GROUPS = "/api/infrastructure_provision/exploratory_environment/lib_groups";
-    public static final String LIB_LIST = "/api/infrastructure_provision/exploratory_environment/search/lib_list";
-    public static final String LIB_INSTALL = "/api/infrastructure_provision/exploratory_environment/lib_install";
-    public static final String LIB_LIST_EXPLORATORY_FORMATTED = "/api/infrastructure_provision/exploratory_environment/lib_list/formatted";
-    public static final String IMAGE_CREATION = "/api/infrastructure_provision/exploratory_environment/image";
-
-    private ApiPath(){}
-
-
-    private static String configureURL(String url, Object... args) {
-        return String.format(url, args);        
-    }
-    
-    public static String getStopNotebookUrl(String serviceBaseName) {
-        return configureURL(STOP_NOTEBOOK, serviceBaseName);
-    }
-    
-    public static String getTerminateClusterUrl(String notebookName, String desName) {
-        return configureURL(TERMINATE_CLUSTER, notebookName, desName);
-    }
-    
-    public static String getTerminateNotebookUrl(String serviceBaseName) {
-        return configureURL(TERMINATE_NOTEBOOK, serviceBaseName);
-    }
-
-	public static String getStartClusterUrl(String notebookName, String desName) {
-		return configureURL(START_CLUSTER, notebookName, desName);
-	}
-
-	public static String getStopClusterUrl(String notebookName, String desName) {
-		return configureURL(STOP_CLUSTER, notebookName, desName);
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/http/ContentType.java b/integration-tests/src/main/java/com/epam/dlab/automation/http/ContentType.java
deleted file mode 100644
index e2a482c..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/http/ContentType.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.http;
-
-public class ContentType{
-    public static final String FORMDATA = "multipart/form-data";
-    public static final String JSON = "application/json";
-    public static final String ANY = "*/*";
-    public static final String TEXT = "text/html";
-
-	private ContentType() {
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/http/HttpRequest.java b/integration-tests/src/main/java/com/epam/dlab/automation/http/HttpRequest.java
deleted file mode 100644
index 2ef936f..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/http/HttpRequest.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.http;
-
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.jayway.restassured.http.ContentType;
-import com.jayway.restassured.response.Response;
-
-import java.io.File;
-import java.util.Map;
-
-import static com.jayway.restassured.RestAssured.given;
-
-public class HttpRequest {
-
-	private static final String AUTHORIZATION = "Authorization";
-	private static final String BEARER = "Bearer ";
-
-	private void addHeader(String headerType, String headerValue) {
-		given().header(headerType, headerValue);
-	}
-
-	public void addAuthorizationBearer(String token) {
-		this.addHeader(AUTHORIZATION, BEARER + token);
-	}
-
-	public Response webApiGet(String url) {
-		return given().contentType(ContentType.JSON).when().get(url);
-	}
-
-	public Response webApiGet(String url, String token) {
-		return given().header(AUTHORIZATION, BEARER + token).contentType(ContentType.JSON).when().get(url);
-	}
-	
-	public Response webApiGet(String url, String token, Map<String,?> params) {
-		return given().header(AUTHORIZATION, BEARER + token).contentType(ContentType.JSON).params(params).when().get
-				(url);
-	}
-
-	public Response webApiPost(String url, String contentType, Object body) {
-		return given().contentType(contentType).body(body).when().post(url);
-	}
-
-	public Response webApiPost(String url, String contentType) {
-		return given().contentType(contentType).when().post(url);
-	}
-
-	public Response webApiPost(String url, String contentType, String token) {
-		return given()
-				.contentType(contentType)
-				.header(AUTHORIZATION, BEARER + token)
-				.multiPart(new File(ConfigPropertyValue.getAccessKeyPubFileName()))
-				.formParam(ConfigPropertyValue.getAccessKeyPubFileName())
-				.contentType(contentType)
-				.when()
-				.post(url);
-	}
-
-	public Response webApiPost(String url, String contentType, Object body, String token) {
-		return given().contentType(contentType).header(AUTHORIZATION, BEARER + token).body(body).when().post(url);
-	}
-
-	public Response webApiPut(String url, String contentType, Object body, String token) {
-		return given().contentType(contentType).header(AUTHORIZATION, BEARER + token).body(body).when().put(url);
-	}
-
-	public Response webApiPut(String url, String contentType, String token) {
-		return given().contentType(contentType).header(AUTHORIZATION, BEARER + token).when().put(url);
-	}
-
-	public Response webApiDelete(String url, String contentType, String token) {
-		return given().contentType(contentType).header(AUTHORIZATION, BEARER + token).when().delete(url);
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/http/HttpStatusCode.java b/integration-tests/src/main/java/com/epam/dlab/automation/http/HttpStatusCode.java
deleted file mode 100644
index 6c4aef0..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/http/HttpStatusCode.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.http;
-
-public class HttpStatusCode {
-    
-    public static final int OK = 200;
-    public static final int UNAUTHORIZED = 401;
-    public static final int ACCEPTED = 202;
-    public static final int NOT_FOUND = 404;
-
-	private HttpStatusCode() {
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsConfigProperties.java b/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsConfigProperties.java
deleted file mode 100644
index 158715b..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsConfigProperties.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.jenkins;
-
-public class JenkinsConfigProperties {
-
-    static final long JENKINS_REQUEST_TIMEOUT = 5000;
-
-	static final String AUTHORIZATION = "Authorization";
-	static final String AUTHORIZATION_KEY = "Basic %s";//the replacement is decoded to base64 user:password
-
-	static final String SUCCESS_STATUS = "true";
-	static final String JENKINS_JOB_NAME_SEARCH = "/";
-
-	static String jenkinsJobStartBody = "\"name=Access_Key_ID&value=%s" +
-            "&name=Secret_Access_Key&value=%s" +
-            "&name=Infrastructure_Tag&value=%s" +
-            "name=OS_user&value=%s&name=Cloud_provider&value=aws&name=OS_family&value=%s&name=Action&value=create" +
-            "&json=%7B%22parameter" +
-            "%22%3A+%5B%7B%22name%22%3A+%22Access_Key_ID%22%2C+%22value%22%3A+%22%s" +
-            "%22%7D%2C+%7B%22name%22%3A+%22Secret_Access_Key%22%2C+%22value%22%3A+%22%s" +
-            "%22%7D%2C+%7B%22name%22%3A+%22Infrastructure_Tag%22%2C+%22value%22%3A+%22%s" +
-            "%22%7D%2C+%7B%22name%22%3A+%22OS_user%22%2C+%22value%22%3A+%22%s" +
-            "%22%7D%2C+%7B%22name%22%3A+%22Cloud_provider%22%2C+%22value%22%3A+%22aws" +
-            "%22%7D%2C+%7B%22name%22%3A+%22OS_family%22%2C+%22value%22%3A+%22%s" +
-            "%22%7D%2C+%7B%22name%22%3A+%22Action%22%2C+%22value%22%3A+%22create" +
-            "%22%7D%5D%7D&Submit=Build";
-
-	private JenkinsConfigProperties() {
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsResponseElements.java b/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsResponseElements.java
deleted file mode 100644
index 0aca9f6..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsResponseElements.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.jenkins;
-
-public class JenkinsResponseElements {
-	public static final String IN_QUEUE_ELEMENT = "freeStyleProject.inQueue";
-	public static final String HTML_TITLE = "html.head.title";
-	public static final String RESULT = "result";
-
-	private JenkinsResponseElements() {
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsService.java b/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsService.java
deleted file mode 100644
index 5d2a995..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsService.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.jenkins;
-
-import com.epam.dlab.automation.exceptions.JenkinsException;
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.epam.dlab.automation.helper.NamingHelper;
-import com.epam.dlab.automation.http.HttpStatusCode;
-import com.jayway.restassured.RestAssured;
-import com.jayway.restassured.authentication.FormAuthConfig;
-import com.jayway.restassured.http.ContentType;
-import com.jayway.restassured.response.Response;
-import com.jayway.restassured.specification.RequestSpecification;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.time.Duration;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import static com.jayway.restassured.RestAssured.given;
-
-public class JenkinsService {
-	private static final Logger LOGGER = LogManager.getLogger(JenkinsService.class);
-
-    private final String awsAccessKeyId;
-    private final String awsSecretAccessKey;
-    
-    private String ssnURL;
-    private String serviceBaseName;
-
-	private FormAuthConfig config = new FormAuthConfig(JenkinsConfigProperties.JENKINS_JOB_NAME_SEARCH, "username",
-			"password");
-    
-    public JenkinsService(){
-    	if (!ConfigPropertyValue.isUseJenkins()) {
-    		ssnURL = ConfigPropertyValue.getSsnUrl();
-    		serviceBaseName = ConfigPropertyValue.getServiceBaseName();
-    	}
-        awsAccessKeyId = convertToParam(ConfigPropertyValue.getAwsAccessKeyId());
-        awsSecretAccessKey = convertToParam(ConfigPropertyValue.getAwsSecretAccessKey());
-    }
-    
-    private String convertToParam(String s) {
-    	s= s.replaceAll("/", "%2F");
-    	return s;
-    }
-    
-    public String getSsnURL() {
-        return ssnURL;
-    }
-
-    public String getServiceBaseName() {
-        return serviceBaseName;
-    }
-    
-    private String getQueueStatus() {
-    	return getWhen(ContentType.XML)
-                .get(JenkinsUrls.API).getBody()
-                .xmlPath()
-                .getString(JenkinsResponseElements.IN_QUEUE_ELEMENT);
-    }
-
-	private void waitForJenkinsStartup(Duration duration) throws InterruptedException {
-    	String actualStatus;
-    	long timeout = duration.toMillis();
-        long expiredTime = System.currentTimeMillis() + timeout;
-        
-    	while ((actualStatus = getQueueStatus()).endsWith(JenkinsConfigProperties.SUCCESS_STATUS)) {
-            Thread.sleep(JenkinsConfigProperties.JENKINS_REQUEST_TIMEOUT);
-            if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-            	actualStatus = getQueueStatus();
-            	break;
-            }
-        }
-        
-        if (actualStatus.endsWith(JenkinsConfigProperties.SUCCESS_STATUS)) {
-            LOGGER.info("ERROR: Timeout has been expired for Jenkins");
-            LOGGER.info("  timeout is {}");
-        }
-    }
-
-	public String runJenkinsJob(String jenkinsJobURL) throws InterruptedException {
-    	if (!ConfigPropertyValue.isUseJenkins()) {
-    		return ConfigPropertyValue.getJenkinsBuildNumber();
-    	}
-
-		baseUriInitialize(jenkinsJobURL);
-        String dateAsString = NamingHelper.generateRandomValue();
-        Response responsePostJob = getWhen(ContentType.URLENC)
-				.body(String.format(JenkinsConfigProperties.jenkinsJobStartBody,
-                        awsAccessKeyId, awsSecretAccessKey, dateAsString,
-                        ConfigPropertyValue.getClusterOsUser(), ConfigPropertyValue.getClusterOsFamily(),
-                        awsAccessKeyId, awsSecretAccessKey, dateAsString,
-                        ConfigPropertyValue.getClusterOsUser(), ConfigPropertyValue.getClusterOsFamily()))
-        		.post(jenkinsJobURL + "build");
-        Assert.assertEquals(responsePostJob.statusCode(), HttpStatusCode.OK);
-        
-        waitForJenkinsStartup(ConfigPropertyValue.getTimeoutJenkinsAutotest());
-        
-        setBuildNumber();
-        checkBuildResult();
-        setJenkinsURLServiceBaseName();
-        
-        return ConfigPropertyValue.getJenkinsBuildNumber();
-    }
-
-	public String getJenkinsJob() throws InterruptedException {
-    	if (!ConfigPropertyValue.isUseJenkins()) {
-    		return ConfigPropertyValue.getJenkinsBuildNumber();
-    	}
-
-		baseUriInitialize(ConfigPropertyValue.getJenkinsJobURL());
-
-        setBuildNumber();
-        checkBuildResult();
-        setJenkinsURLServiceBaseName();
-
-        return ConfigPropertyValue.getJenkinsBuildNumber();
-    }
-
-	private static void baseUriInitialize(String value) {
-		RestAssured.baseURI = value;
-	}
-
-	private void setBuildNumber() {
-        if (ConfigPropertyValue.getJenkinsBuildNumber() != null) {
-            LOGGER.info("Jenkins build number is {}", ConfigPropertyValue.getJenkinsBuildNumber());
-        	return;
-    	}
-
-        String buildName = getWhen(ContentType.URLENC)
-                .get(JenkinsUrls.LAST_BUILD).getBody().htmlPath().getString(JenkinsResponseElements.HTML_TITLE);
-        
-        Pattern pattern = Pattern.compile("\\s#\\d+(?!\\d+)\\s");      
-        Matcher matcher = pattern.matcher(buildName);
-        if(matcher.find()) {
-        	ConfigPropertyValue.setJenkinsBuildNumber(matcher.group().substring(2).trim());
-        } else {
-			throw new JenkinsException("Jenkins job was failed. There is no buildNumber");
-        }
-        LOGGER.info("Jenkins build number is {}", ConfigPropertyValue.getJenkinsBuildNumber());
-    }
-
-
-	private void checkBuildResult() throws InterruptedException {
-    	String buildResult;
-    	long timeout = ConfigPropertyValue.getTimeoutJenkinsAutotest().toMillis();
-    	long expiredTime = System.currentTimeMillis() + timeout;
-        
-        do {
-        	buildResult = getWhen(ContentType.JSON)
-        			.get(ConfigPropertyValue.getJenkinsBuildNumber() + JenkinsUrls.JSON_PRETTY)
-        			.getBody()
-                    .jsonPath()
-                    .getString(JenkinsResponseElements.RESULT);
-            if (buildResult == null) {
-            	if (timeout != 0 && expiredTime < System.currentTimeMillis()) {
-					throw new JenkinsException("Timeout has been expired for Jenkins build. Timeout is " +
-							ConfigPropertyValue.getTimeoutJenkinsAutotest());
-            	}
-            	Thread.sleep(JenkinsConfigProperties.JENKINS_REQUEST_TIMEOUT);
-            }
-        } while (buildResult == null);
-        
-        if(!buildResult.equals("SUCCESS")) {
-			throw new JenkinsException("Jenkins job was failed. Build result is not success");
-        }
-    }
-
-	private void setJenkinsURLServiceBaseName() {
-        String jenkinsHoleURL = getWhen(ContentType.TEXT)
-        		.get(ConfigPropertyValue.getJenkinsBuildNumber() + JenkinsUrls.LOG_TEXT)
-        		.getBody()
-                .prettyPrint();
-        Pattern pattern = Pattern.compile("Jenkins URL:(.+)");      
-        Matcher matcher = pattern.matcher(jenkinsHoleURL);
-        if(matcher.find()) {
-        	ssnURL = matcher.group(1).replaceAll("/jenkins", "");         
-        }
-            
-        pattern = Pattern.compile("Service base name:(.+)");      
-        matcher = pattern.matcher(jenkinsHoleURL);
-        if(matcher.find()) {
-        	serviceBaseName = matcher.group(1);         
-        } else {
-			throw new JenkinsException("SSN URL in Jenkins job not found");
-        }
-    }
-
-    private RequestSpecification getWhen(ContentType contentType) {
-        return given()
-                .header(JenkinsConfigProperties.AUTHORIZATION,
-						String.format(JenkinsConfigProperties.AUTHORIZATION_KEY, base64CredentialDecode
-								(ConfigPropertyValue.get(ConfigPropertyValue.JENKINS_USERNAME), ConfigPropertyValue
-										.get(ConfigPropertyValue.JENKINS_PASS))))
-        		.auth()
-                .form(ConfigPropertyValue.getJenkinsUsername(), ConfigPropertyValue.getJenkinsPassword(), config)
-        		.contentType(contentType).when();
-    }
-
-    private static String base64CredentialDecode(String user, String password) {
-        byte[] bytesEncoded = Base64.encodeBase64(String.format("%s:%s", user, password).getBytes());
-        return new String(bytesEncoded);
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsUrls.java b/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsUrls.java
deleted file mode 100644
index 04bedcc..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/jenkins/JenkinsUrls.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.jenkins;
-
-public class JenkinsUrls {
-	public static final String API = "api/xml";
-	public static final String LAST_BUILD = "lastBuild";
-	public static final String JSON_PRETTY = "/api/json?pretty=true";
-	public static final String LOG_TEXT = "/logText/progressiveText?start=0";
-
-	private JenkinsUrls() {
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/CreateNotebookDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/CreateNotebookDto.java
deleted file mode 100644
index 71bfedf..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/CreateNotebookDto.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class CreateNotebookDto {
-    
-	private String image;
-    private String name;
-    @JsonProperty("template_name")
-    private String templateName;
-    private String shape;
-    private String version;
-	@JsonProperty("notebook_image_name")
-	private String imageName;
-
-	public String getImageName() {
-		return imageName;
-	}
-
-	public void setImageName(String imageName) {
-		this.imageName = imageName;
-	}
-
-	public String getImage() {
-		return image;
-	}
-
-	public void setImage(String image) {
-		this.image = image;
-	}
-
-	public String getName() {
-        return name;
-    }
-    
-    public void setName(String name) {
-        this.name = name;
-    }
-    
-    public String getShape() {
-        return shape;
-    }
-    
-    public void setShape(String shape) {
-        this.shape = shape;
-    }
-    
-	public String getTemplateName() {
-		return templateName;
-	}
-
-	public void setTemplateName(String templateName) {
-		this.templateName = templateName;
-	}
-
-    public String getVersion() {
-        return version;
-    }
-    
-    public void setVersion(String version) {
-        this.version = version;
-    }
-    
-    public CreateNotebookDto(){
-		//This empty constructor is required for proper serialization/deserialization
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployClusterDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployClusterDto.java
deleted file mode 100644
index 695a5eb..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployClusterDto.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public abstract class DeployClusterDto {
-
-	private String image;
-
-	@JsonProperty("template_name")
-	private String templateName;
-	private String name;
-
-	@JsonProperty("notebook_name")
-	private String notebookName;
-
-	public String getImage() {
-		return image;
-	}
-
-	public void setImage(String image) {
-		this.image = image;
-	}
-
-	public String getTemplateName() {
-		return templateName;
-	}
-
-	public void setTemplateName(String templateName) {
-		this.templateName = templateName;
-	}
-
-	public String getName() {
-		return name;
-	}
-
-	public void setName(String name) {
-		this.name = name;
-	}
-
-	public String getNotebookName() {
-		return notebookName;
-	}
-
-	public void setNotebookName(String notebookName) {
-		this.notebookName = notebookName;
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployDataProcDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployDataProcDto.java
deleted file mode 100644
index b3b64e2..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployDataProcDto.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
-public class DeployDataProcDto extends DeployClusterDto {
-
-	@JsonProperty("dataproc_master_count")
-	private String dataprocMasterCount;
-
-	@JsonProperty("dataproc_slave_count")
-	private String dataprocSlaveCount;
-
-	@JsonProperty("dataproc_preemptible_count")
-	private String dataprocPreemptibleCount;
-
-	@JsonProperty("dataproc_master_instance_type")
-	private String dataprocMasterInstanceType;
-
-	@JsonProperty("dataproc_slave_instance_type")
-	private String dataprocSlaveInstanceType;
-
-	@JsonProperty("dataproc_version")
-	private String dataprocVersion;
-
-
-	public String getDataprocMasterCount() {
-		return dataprocMasterCount;
-	}
-
-	public void setDataprocMasterCount(String dataprocMasterCount) {
-		this.dataprocMasterCount = dataprocMasterCount;
-	}
-
-	public String getDataprocSlaveCount() {
-		return dataprocSlaveCount;
-	}
-
-	public void setDataprocSlaveCount(String dataprocSlaveCount) {
-		this.dataprocSlaveCount = dataprocSlaveCount;
-	}
-
-	public String getDataprocPreemptibleCount() {
-		return dataprocPreemptibleCount;
-	}
-
-	public void setDataprocPreemptibleCount(String dataprocPreemptibleCount) {
-		this.dataprocPreemptibleCount = dataprocPreemptibleCount;
-	}
-
-	public String getDataprocMasterInstanceType() {
-		return dataprocMasterInstanceType;
-	}
-
-	public void setDataprocMasterInstanceType(String dataprocMasterInstanceType) {
-		this.dataprocMasterInstanceType = dataprocMasterInstanceType;
-	}
-
-	public String getDataprocSlaveInstanceType() {
-		return dataprocSlaveInstanceType;
-	}
-
-	public void setDataprocSlaveInstanceType(String dataprocSlaveInstanceType) {
-		this.dataprocSlaveInstanceType = dataprocSlaveInstanceType;
-	}
-
-	public String getDataprocVersion() {
-		return dataprocVersion;
-	}
-
-	public void setDataprocVersion(String dataprocVersion) {
-		this.dataprocVersion = dataprocVersion;
-	}
-
-	@Override
-	public String toString() {
-		return MoreObjects.toStringHelper(this)
-				.add("image", getImage())
-				.add("template_name", getTemplateName())
-				.add("name", getName())
-				.add("notebook_name", getNotebookName())
-				.add("dataproc_master_count", dataprocMasterCount)
-				.add("dataproc_slave_count", dataprocSlaveCount)
-				.add("dataproc_preemptible_count", dataprocPreemptibleCount)
-				.add("dataproc_master_instance_type", dataprocMasterInstanceType)
-				.add("dataproc_slave_instance_type", dataprocSlaveInstanceType)
-				.add("dataproc_version", dataprocVersion)
-				.toString();
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployEMRDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployEMRDto.java
deleted file mode 100644
index 8f3ac1e..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeployEMRDto.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
-public class DeployEMRDto extends DeployClusterDto{
-
-	@JsonProperty("emr_instance_count")
-	private String emrInstanceCount;
-
-	@JsonProperty("emr_master_instance_type")
-	private String emrMasterInstanceType;
-
-	@JsonProperty("emr_slave_instance_type")
-	private String emrSlaveInstanceType;
-
-	@JsonProperty("emr_slave_instance_spot")
-	private boolean emrSlaveInstanceSpot = false;
-
-	@JsonProperty("emr_slave_instance_spot_pct_price")
-	private Integer emrSlaveInstanceSpotPctPrice = 0;
-
-	@JsonProperty("emr_version")
-	private String emrVersion;
-
-
-	public String getEmrInstanceCount() {
-		return emrInstanceCount;
-	}
-
-	public void setEmrInstanceCount(String emrInstanceCount) {
-		this.emrInstanceCount = emrInstanceCount;
-	}
-
-	public String getEmrMasterInstanceType() {
-		return emrMasterInstanceType;
-	}
-
-	public void setEmrMasterInstanceType(String emrMasterInstanceType) {
-		this.emrMasterInstanceType = emrMasterInstanceType;
-	}
-
-	public String getEmrSlaveInstanceType() {
-		return emrSlaveInstanceType;
-	}
-
-	public void setEmrSlaveInstanceType(String emrSlaveInstanceType) {
-		this.emrSlaveInstanceType = emrSlaveInstanceType;
-	}
-
-	public boolean isEmrSlaveInstanceSpot() {
-		return emrSlaveInstanceSpot;
-	}
-
-	public void setEmrSlaveInstanceSpot(boolean emrSlaveInstanceSpot) {
-		this.emrSlaveInstanceSpot = emrSlaveInstanceSpot;
-	}
-
-	public Integer getEmrSlaveInstanceSpotPctPrice() {
-		return emrSlaveInstanceSpotPctPrice;
-	}
-
-	public void setEmrSlaveInstanceSpotPctPrice(Integer emrSlaveInstanceSpotPctPrice) {
-		this.emrSlaveInstanceSpotPctPrice = emrSlaveInstanceSpotPctPrice;
-	}
-
-	public String getEmrVersion() {
-		return emrVersion;
-	}
-
-	public void setEmrVersion(String emrVersion) {
-		this.emrVersion = emrVersion;
-	}
-
-	@Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this)
-        		.add("image", getImage())
-				.add("template_name", getTemplateName())
-        		.add("name", getName())
-				.add("notebook_name", getNotebookName())
-				.add("emr_instance_count", emrInstanceCount)
-				.add("emr_master_instance_type", emrMasterInstanceType)
-				.add("emr_slave_instance_type", emrSlaveInstanceType)
-				.add("emr_slave_instance_spot", emrSlaveInstanceSpot)
-				.add("emr_slave_instance_spot_pct_price", emrSlaveInstanceSpotPctPrice)
-				.add("emr_version", emrVersion)
-        		.toString();
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeploySparkDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/DeploySparkDto.java
deleted file mode 100644
index d1b4734..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/DeploySparkDto.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
-public class DeploySparkDto extends DeployClusterDto{
-
-	@JsonProperty("dataengine_instance_count")
-	private String dataengineInstanceCount;
-
-	@JsonProperty("dataengine_instance_shape")
-	private String dataengineInstanceShape;
-
-
-	public String getDataengineInstanceCount() {
-		return dataengineInstanceCount;
-	}
-
-	public void setDataengineInstanceCount(String dataengineInstanceCount) {
-		this.dataengineInstanceCount = dataengineInstanceCount;
-	}
-
-	public String getDataengineInstanceShape() {
-		return dataengineInstanceShape;
-	}
-
-	public void setDataengineInstanceShape(String dataengineInstanceShape) {
-		this.dataengineInstanceShape = dataengineInstanceShape;
-	}
-
-	@Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this)
-        		.add("image", getImage())
-				.add("template_name", getTemplateName())
-        		.add("name", getName())
-				.add("notebook_name", getNotebookName())
-				.add("dataengine_instance_shape", dataengineInstanceShape)
-				.add("dataengine_instance_count", dataengineInstanceCount)
-        		.toString();
-    }
-	
-	
-
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/ExploratoryImageDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/ExploratoryImageDto.java
deleted file mode 100644
index 84950df..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/ExploratoryImageDto.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class ExploratoryImageDto {
-
-	@JsonProperty("exploratory_name")
-	private String notebookName;
-	private String name;
-	private String description;
-
-	public ExploratoryImageDto() {
-	}
-
-	public ExploratoryImageDto(String notebookName, String name, String description) {
-		this.notebookName = notebookName;
-		this.name = name;
-		this.description = description;
-	}
-
-	public String getNotebookName() {
-		return notebookName;
-	}
-
-	public void setNotebookName(String notebookName) {
-		this.notebookName = notebookName;
-	}
-
-	public String getName() {
-		return name;
-	}
-
-	public void setName(String name) {
-		this.name = name;
-	}
-
-	public String getDescription() {
-		return description;
-	}
-
-	public void setDescription(String description) {
-		this.description = description;
-	}
-
-	@Override
-	public String toString() {
-		return "ExploratoryImageDto{" +
-				"notebookName='" + notebookName + '\'' +
-				", name='" + name + '\'' +
-				", description='" + description + '\'' +
-				'}';
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/ImageDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/ImageDto.java
deleted file mode 100644
index 0a5dc0b..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/ImageDto.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-public class ImageDto {
-
-	private String name;
-	private String description;
-	private String application;
-	private String fullName;
-	private String status;
-
-	public ImageDto() {
-	}
-
-	public ImageDto(String name, String description, String application, String fullName, String status) {
-
-		this.name = name;
-		this.description = description;
-		this.application = application;
-		this.fullName = fullName;
-		this.status = status;
-	}
-
-	public String getName() {
-		return name;
-	}
-
-	public void setName(String name) {
-		this.name = name;
-	}
-
-	public String getDescription() {
-		return description;
-	}
-
-	public void setDescription(String description) {
-		this.description = description;
-	}
-
-	public String getApplication() {
-		return application;
-	}
-
-	public void setApplication(String application) {
-		this.application = application;
-	}
-
-	public String getFullName() {
-		return fullName;
-	}
-
-	public void setFullName(String fullName) {
-		this.fullName = fullName;
-	}
-
-	public String getStatus() {
-		return status;
-	}
-
-	public void setStatus(String status) {
-		this.status = status;
-	}
-
-	@Override
-	public String toString() {
-		return "ImageDto{" +
-				"name='" + name + '\'' +
-				", description='" + description + '\'' +
-				", application='" + application + '\'' +
-				", fullName='" + fullName + '\'' +
-				", status='" + status + '\'' +
-				'}';
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/JsonMapperDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/JsonMapperDto.java
deleted file mode 100644
index 11cdd50..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/JsonMapperDto.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.type.CollectionType;
-import com.fasterxml.jackson.databind.type.TypeFactory;
-import com.google.gson.JsonParseException;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.util.List;
-
-public class JsonMapperDto {
-
-    private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
-
-	private JsonMapperDto() {
-	}
-
-    @SuppressWarnings("unchecked")
-	public static <T> T readNode(String pathToJson, Class<T> clasz) throws IOException {
-        try (FileInputStream in = new FileInputStream(pathToJson)){
-			return OBJECT_MAPPER.readerFor(clasz).readValue(in);
-        }
-    }
-
-    public static <T> List<T> readListOf(String pathToJson, Class<T> clasz) {
-        try (FileInputStream in = new FileInputStream(pathToJson)){
-            CollectionType typeReference = TypeFactory.defaultInstance().constructCollectionType(List.class, clasz);
-            return OBJECT_MAPPER.readValue(in, typeReference);
-        } catch (IOException e) {
-			throw new JsonParseException("Cannot read json file", e);
-        }
-    }
-
-    public static <T> T readObject(String pathToJson, Class<T> clasz) {
-        try (FileInputStream in = new FileInputStream(pathToJson)){
-            return OBJECT_MAPPER.readValue(in, clasz);
-        } catch (IOException e) {
-			throw new JsonParseException("Cannot read json file", e);
-        }
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/Lib.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/Lib.java
deleted file mode 100644
index a804c18..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/Lib.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-import lombok.EqualsAndHashCode;
-
-@JsonIgnoreProperties(ignoreUnknown = true)
-@EqualsAndHashCode
-public class Lib {
-	@JsonProperty
-	private String group;
-	@JsonProperty
-	private String name;
-	@JsonProperty
-	private String version;
-
-	public Lib() {
-	}
-
-	public Lib(String group, String name, String version) {
-		this.group = group;
-		this.name = name;
-		this.version = version;
-	}
-
-	public String getGroup() {
-		return group;
-	}
-
-	public String getName() {
-		return name;
-	}
-
-	public String getVersion() {
-		return version;
-	}
-
-	@Override
-	public String toString() {
-		return MoreObjects.toStringHelper(this)
-				.add("group", group)
-				.add("name", name)
-				.add("version", version)
-				.toString();
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/LoginDto.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/LoginDto.java
deleted file mode 100644
index 4018643..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/LoginDto.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class LoginDto {
-
-    private String username;
-    private String password;
-
-	@JsonProperty("access_token")
-	private String accessToken;
-    
-    public String getUsername() {
-        return username;
-    }
-    public void setUsername(String username) {
-        this.username = username;
-    }
-    
-    public String getPassword() {
-        return password;
-    }
-    public void setPassword(String password) {
-        this.password = password;
-    }
-
-	public String getAccessToken() {
-		return accessToken;
-    }
-    
-    public LoginDto(String username, String password) {
-        this.username = username;
-        this.password = password;
-		this.accessToken = "";
-    }
-   
-    public LoginDto(){
-        
-    }
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/model/NotebookConfig.java b/integration-tests/src/main/java/com/epam/dlab/automation/model/NotebookConfig.java
deleted file mode 100644
index bac7893..0000000
--- a/integration-tests/src/main/java/com/epam/dlab/automation/model/NotebookConfig.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.model;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.List;
-
-public class NotebookConfig {
-
-    @JsonProperty("notebook_template")
-    private String notebookTemplate;
-
-    @JsonProperty("data_engine_type")
-    private String dataEngineType;
-
-    @JsonProperty("full_test")
-    private boolean fullTest;
-
-
-    @JsonProperty("timeout_notebook_create")
-    private String timeoutNotebookCreate = "60m";
-
-    @JsonProperty("timeout_notebook_startup")
-    private String timeoutNotebookStartup = "20m";
-
-    @JsonProperty("timeout_notebook_shutdown")
-    private String timeoutNotebookShutdown = "20m";
-
-
-    @JsonProperty("timeout_cluster_create")
-    private String timeoutClusterCreate = "60m";
-
-	@JsonProperty("timeout_cluster_startup")
-	private String timeoutClusterStartup = "20m";
-
-	@JsonProperty("timeout_cluster_stop")
-	private String timeoutClusterStop = "20m";
-
-    @JsonProperty("timeout_cluster_terminate")
-    private String timeoutClusterTerminate = "20m";
-
-
-    @JsonProperty("timeout_lib_groups")
-    private String timeoutLibGroups = "5m";
-
-    @JsonProperty("timeout_lib_list")
-    private String timeoutLibList = "5m";
-
-    @JsonProperty("timeout_lib_install")
-    private String timeoutLibInstall = "15m";
-
-	@JsonProperty("timeout_image_create")
-	private String timeoutImageCreate = "60m";
-
-	@JsonProperty("image_test_required")
-	private boolean imageTestRequired = false;
-
-	@JsonProperty("skipped_libraries")
-	private List<Lib> skippedLibraries;
-
-	@JsonProperty("notebook_shape")
-	private String notebookShape = StringUtils.EMPTY;
-
-	@JsonProperty("des_version")
-	private String desVersion = StringUtils.EMPTY;
-
-	@JsonProperty("des_spot_required")
-	private boolean desSpotRequired = false;
-
-	@JsonProperty("des_spot_price")
-	private int desSpotPrice = 0;
-
-	public List<Lib> getSkippedLibraries() {
-		return skippedLibraries;
-	}
-
-	public String getTimeoutNotebookCreate() {
-    	return timeoutNotebookCreate;
-    }
-
-	public String getNotebookShape() {
-		return notebookShape;
-	}
-
-	public String getDesVersion() {
-		return desVersion;
-	}
-
-	public boolean isDesSpotRequired() {
-		return desSpotRequired;
-	}
-
-	public int getDesSpotPrice() {
-		return desSpotPrice;
-	}
-
-	public String getTimeoutNotebookStartup() {
-    	return timeoutNotebookStartup;
-    }
-
-    public String getTimeoutNotebookShutdown() {
-    	return timeoutNotebookShutdown;
-    }
-
-    public String getTimeoutClusterCreate() {
-    	return timeoutClusterCreate;
-    }
-
-    public String getTimeoutClusterTerminate() {
-    	return timeoutClusterTerminate;
-    }
-
-    public String getTimeoutLibGroups() {
-    	return timeoutLibGroups;
-    }
-
-    public String getTimeoutLibList() {
-    	return timeoutLibList;
-    }
-
-    public String getTimeoutLibInstall() {
-    	return timeoutLibInstall;
-    }
-
-	public String getTimeoutImageCreate() {
-		return timeoutImageCreate;
-	}
-
-    public String getNotebookTemplate() {
-    	return notebookTemplate;
-    }
-
-
-    public String getDataEngineType() {
-    	return dataEngineType;
-    }
-
-	public String getTimeoutClusterStartup() {
-		return timeoutClusterStartup;
-	}
-
-	public String getTimeoutClusterStop() {
-		return timeoutClusterStop;
-	}
-
-	public boolean isFullTest() {
-    	return fullTest;
-    }
-
-	public boolean isImageTestRequired() {
-		return imageTestRequired;
-	}
-
-	public void setImageTestRequired(boolean imageTestRequired) {
-		this.imageTestRequired = imageTestRequired;
-	}
-
-	public void setSkippedLibraries(List<Lib> skippedLibraries) {
-		this.skippedLibraries = skippedLibraries;
-	}
-
-
-	@Override
-    public String toString() {
-    	return MoreObjects.toStringHelper(this)
-    			.add("timeoutClusterCreate", timeoutClusterCreate)
-    			.add("timeoutClusterTerminate", timeoutClusterTerminate)
-				.add("timeoutClusterStartup", timeoutClusterStartup)
-				.add("timeoutClusterStop", timeoutClusterStop)
-    			.add("timeoutLibGroups", timeoutLibGroups)
-    			.add("timeoutLibInstall", timeoutLibInstall)
-				.add("timeoutImageCreate", timeoutImageCreate)
-    			.add("timeoutLibList", timeoutLibList)
-    			.add("timeoutNotebookCreate", timeoutNotebookCreate)
-    			.add("timeoutNotebookShutdown", timeoutNotebookShutdown)
-    			.add("timeoutNotebookStartup", timeoutNotebookStartup)
-    			.add("notebookTemplate", notebookTemplate)
-				.add("notebookShape", notebookShape)
-    			.add("dataEngineType", dataEngineType)
-				.add("dataEngineServiceVersion", desVersion)
-				.add("dataEngineServiceSpotRequired", desSpotRequired)
-				.add("dataEngineServiceSpotPrice", desSpotPrice)
-    			.add("fullTest", fullTest)
-				.add("imageTestRequired", imageTestRequired)
-				.add("skippedLibraries", skippedLibraries)
-    			.toString();
-    }
-
-}
diff --git a/integration-tests/src/main/resources/application.properties b/integration-tests/src/main/resources/application.properties
deleted file mode 100644
index 67a3228..0000000
--- a/integration-tests/src/main/resources/application.properties
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-conf.file.location=${CONF_ROOT_PATH}/config.properties
-keys.directory.location=${CONF_ROOT_PATH}/keys
-notebook.test.data.copy.script=${CONF_ROOT_PATH}/copy_files.py
-notebook.test.lib.location=${CONF_ROOT_PATH}/test_libs
-jupyter.test.templates.location=${CONF_ROOT_PATH}/test_templates/jupyter
-deeplearning.test.templates.location=${CONF_ROOT_PATH}/test_templates/deeplearning
-rstudio.test.templates.location=${CONF_ROOT_PATH}/test_templates/rstudio
-tensor.test.templates.location=${CONF_ROOT_PATH}/test_templates/tensor
-zeppelin.test.templates.location=${CONF_ROOT_PATH}/test_templates/zeppelin
-scenario.jupyter.files.location=${CONF_ROOT_PATH}/scenario_jupyter/
-scenario.rstudio.files.location=${CONF_ROOT_PATH}/scenario_rstudio/
-scenario.zeppelin.files.location=${CONF_ROOT_PATH}/scenario_zeppelin/
-scenario.deeplearning.files.location=${CONF_ROOT_PATH}/scenario_deeplearning/
-scenario.tensor.files.location=${CONF_ROOT_PATH}/scenario_tensor/
-ec2.config.files.location=${CONF_ROOT_PATH}/ec2_templates/
-azure.config.files.location=${CONF_ROOT_PATH}/azure_templates/
-gcp.config.files.location=${CONF_ROOT_PATH}/gcp_templates/
diff --git a/integration-tests/src/main/resources/log4j2.xml b/integration-tests/src/main/resources/log4j2.xml
deleted file mode 100644
index 8c91840..0000000
--- a/integration-tests/src/main/resources/log4j2.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~   http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing,
-  ~ software distributed under the License is distributed on an
-  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  ~ KIND, either express or implied.  See the License for the
-  ~ specific language governing permissions and limitations
-  ~ under the License.
-  -->
-
-<Configuration>
-
-	<appender name="console" class="org.apache.log4j.ConsoleAppender">
-		<layout class="org.apache.log4j.PatternLayout">
-			<param name="ConversionPattern"
-				   value="%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n" />
-		</layout>
-	</appender>
-
-	<appender name="file" class="org.apache.log4j.FileAppender">
-
-		<param name="file" value="FILE.log"/>
-		<param name="immediateFlush" value="true"/>
-		<param name="threshold" value="debug"/>
-		<param name="append" value="false"/>
-
-		<layout class="org.apache.log4j.PatternLayout">
-			<param name="conversionPattern" value="%m%n"/>
-		</layout>
-	</appender>
-
-	<Appenders>
-		<Console name="console" target="SYSTEM_OUT">
-			<PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
-		</Console>
-		<File name="file" fileName="output.log" bufferedIO="false" advertiseURI="file:log.log" advertise="true">
-		</File>
-	</Appenders>
-
-
-	<Loggers>
-		<Root level="info">
-			<AppenderRef ref="file" />
-			<AppenderRef ref="console" />
-		</Root>
-		<Logger name="com.epam.dlab.automation" level="debug" additivity="false">
-			<AppenderRef ref="file" />
-			<AppenderRef ref="console" />
-    	</Logger>
-	</Loggers>
-
-</Configuration>
\ No newline at end of file
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/TestCallable.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/TestCallable.java
deleted file mode 100644
index 881b69e..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/TestCallable.java
+++ /dev/null
@@ -1,766 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test;
-
-import com.epam.dlab.automation.cloud.VirtualMachineStatusChecker;
-import com.epam.dlab.automation.cloud.aws.AmazonHelper;
-import com.epam.dlab.automation.docker.Docker;
-import com.epam.dlab.automation.helper.*;
-import com.epam.dlab.automation.http.ApiPath;
-import com.epam.dlab.automation.http.ContentType;
-import com.epam.dlab.automation.http.HttpRequest;
-import com.epam.dlab.automation.http.HttpStatusCode;
-import com.epam.dlab.automation.model.*;
-import com.epam.dlab.automation.test.libs.LibsHelper;
-import com.epam.dlab.automation.test.libs.TestLibGroupStep;
-import com.epam.dlab.automation.test.libs.TestLibInstallStep;
-import com.epam.dlab.automation.test.libs.TestLibListStep;
-import com.epam.dlab.automation.test.libs.models.LibToSearchData;
-import com.jayway.restassured.response.Response;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.File;
-import java.nio.file.Paths;
-import java.time.Duration;
-import java.util.*;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static org.testng.Assert.fail;
-
-public class TestCallable implements Callable<Boolean> {
-    private final static Logger LOGGER = LogManager.getLogger(TestCallable.class);
-
-    private final String notebookTemplate;
-    private final boolean fullTest;
-	private final String token, ssnExpEnvURL, ssnProUserResURL, ssnCompResURL;
-    private final String storageName;
-    private final String notebookName, clusterName, dataEngineType;
-    private final NotebookConfig notebookConfig;
-	private final List<Lib> skippedLibraries;
-	private final boolean imageTestRequired;
-	private int libsFailedToInstall = 0;
-
-	TestCallable(NotebookConfig notebookConfig) {
-    	this.notebookTemplate = notebookConfig.getNotebookTemplate();
-    	this.dataEngineType = notebookConfig.getDataEngineType();
-        this.fullTest = notebookConfig.isFullTest();
-
-		this.notebookConfig = notebookConfig;
-		this.skippedLibraries = notebookConfig.getSkippedLibraries();
-		this.imageTestRequired = notebookConfig.isImageTestRequired();
-        
-        this.token = NamingHelper.getSsnToken();
-        this.ssnExpEnvURL = NamingHelper.getSelfServiceURL(ApiPath.EXP_ENVIRONMENT);
-        this.ssnProUserResURL = NamingHelper.getSelfServiceURL(ApiPath.PROVISIONED_RES);
-        this.storageName = NamingHelper.getStorageName();
-
-        final String suffixName = NamingHelper.generateRandomValue(notebookTemplate);
-        notebookName = "nb" + suffixName;
-
-		if (NamingHelper.DATA_ENGINE.equals(dataEngineType)) {
-        	this.ssnCompResURL=NamingHelper.getSelfServiceURL(ApiPath.COMPUTATIONAL_RES_SPARK);
-			clusterName = "spark" + suffixName;
-		} else if (NamingHelper.DATA_ENGINE_SERVICE.equals(dataEngineType)) {
-        	this.ssnCompResURL=NamingHelper.getSelfServiceURL(ApiPath.COMPUTATIONAL_RES);
-			clusterName = "des" + suffixName;
-        } else {
-			ssnCompResURL = "";
-			clusterName = NamingHelper.CLUSTER_ABSENT;
-			LOGGER.info("illegal argument dataEngineType {} , should be dataengine or dataengine-service",
-					dataEngineType);
-        }
-
-        LOGGER.info("   SSN exploratory environment URL is {}", ssnExpEnvURL);
-        LOGGER.info("   SSN provisioned user resources URL is {}", ssnProUserResURL);
-    }
-
-    private static Duration getDuration(String duration) {
-    	return Duration.parse("PT" + duration);
-    }
-
-	@Override
-    public Boolean call() throws Exception {
-		try {
-			final String notebookIp = createNotebook(notebookName, "");
-			testLibs();
-
-			if (imageTestRequired) {
-				executeImageTest();
-			}
-
-			final DeployClusterDto deployClusterDto = createClusterDto();
-			final String actualClusterName = deployClusterDto != null ? NamingHelper.getClusterName(
-					NamingHelper.getClusterInstanceNameForTestDES(notebookName, clusterName, dataEngineType),
-					dataEngineType, true) : NamingHelper.CLUSTER_ABSENT;
-
-			LOGGER.info("Actual cluster name of {} is {}", dataEngineType, actualClusterName);
-
-			if (NamingHelper.DATA_ENGINE.equals(dataEngineType)) {
-				LOGGER.debug("Spark cluster {} is stopping...", clusterName);
-				stopCluster();
-				LOGGER.debug("Starting Spark cluster {}...", clusterName);
-				startCluster();
-			}
-
-			if (!ConfigPropertyValue.isRunModeLocal()) {
-
-				TestDataEngineService test = new TestDataEngineService();
-				test.run(notebookName, notebookTemplate, actualClusterName);
-
-				String notebookScenarioFilesLocation = PropertiesResolver.getPropertyByName(
-						String.format(PropertiesResolver.NOTEBOOK_SCENARIO_FILES_LOCATION_PROPERTY_TEMPLATE,
-								notebookTemplate));
-				String notebookTemplatesLocation = PropertiesResolver.getPropertyByName(
-						String.format(PropertiesResolver.NOTEBOOK_TEST_TEMPLATES_LOCATION, notebookTemplate));
-				test.run2(NamingHelper.getSsnIp(), notebookIp, actualClusterName,
-						new File(notebookScenarioFilesLocation),
-						new File(notebookTemplatesLocation), notebookName);
-			}
-
-			if (NamingHelper.DATA_ENGINE_SERVICE.equals(dataEngineType) && fullTest && deployClusterDto != null) {
-				stopEnvironment();
-				restartNotebookAndRedeployToTerminate(deployClusterDto);
-			}
-			if (deployClusterDto != null) {
-				terminateNotebook(deployClusterDto);
-			} else {
-				terminateNotebook(notebookName);
-			}
-
-			LOGGER.info("{} All tests finished successfully", notebookName);
-			return true;
-		} catch (AssertionError | Exception e) {
-			LOGGER.error("Error occurred while testing notebook {} with configuration {}", notebookName,
-					notebookConfig, e);
-			throw e;
-		}
-	}
-
-	private void executeImageTest() throws Exception {
-		LOGGER.debug("Tests with machine image are starting...");
-		try {
-			String imageName = "TestIm" +
-					String.valueOf(new Random().ints(0, 1000).findFirst().orElse(0));
-			LOGGER.info("Machine image with name {} from notebook {} is creating...", imageName, notebookName);
-			createMachineImageFromNotebook(notebookName, imageName);
-			LOGGER.info("Machine image with name {} was successfully created.", imageName);
-
-			String copyNotebookName = "cp" + notebookName;
-			LOGGER.info("Notebook {} from machine image {} is creating...", copyNotebookName, imageName);
-			createNotebook(copyNotebookName, imageName);
-			LOGGER.info("Notebook {} from machine image {} was successfully created.", copyNotebookName, imageName);
-
-			LOGGER.info("Comparing notebooks: {} with {}...", notebookName, copyNotebookName);
-			if (areNotebooksEqual(notebookName, copyNotebookName)) {
-				LOGGER.info("Notebooks with names {} and {} are equal", notebookName, copyNotebookName);
-			} else {
-				Assert.fail("Notebooks aren't equal. Created from machine image notebook is different from base " +
-						"exploratory");
-			}
-
-			LOGGER.debug("Notebook {} created from image {} is terminating...", copyNotebookName, imageName);
-			terminateNotebook(copyNotebookName);
-
-			LOGGER.info("Tests with machine image creation finished successfully");
-		} catch (AssertionError | Exception e) {
-			LOGGER.error("Error occurred while testing notebook {} and machine image {}", notebookName, e);
-			throw e;
-		}
-	}
-
-	private DeployClusterDto createClusterDto() throws Exception {
-	if (ConfigPropertyValue.getCloudProvider().equalsIgnoreCase(CloudProvider.AZURE_PROVIDER)
-			&& NamingHelper.DATA_ENGINE_SERVICE.equals(dataEngineType)) {
-        LOGGER.info("There are no available dataengine services for Azure. Cluster creation is skipped.");
-        return null;
-    }
-	if (!NamingHelper.DATA_ENGINE_SERVICE.equals(dataEngineType) && !NamingHelper.DATA_ENGINE.equals(dataEngineType)) {
-		LOGGER.info("Parameter 'dataEngineType' is unspecified or isn't valid. Cluster creation is skipped.");
-		return null;
-	}
-	String gettingStatus;
-    LOGGER.info("7. {} cluster {} will be deployed for {} ...",dataEngineType, clusterName, notebookName);
-    LOGGER.info("  {} : SSN computational resources URL is {}", notebookName, ssnCompResURL);
-
-    DeployClusterDto clusterDto = null;
-	if (NamingHelper.DATA_ENGINE.equals(dataEngineType)) {
-		clusterDto = JsonMapperDto.readNode(
-					Paths.get(String.format("%s/%s", CloudHelper.getClusterConfFileLocation(), notebookTemplate), "spark_cluster.json").toString(),
-					DeploySparkDto.class);
-	} else if (NamingHelper.DATA_ENGINE_SERVICE.equals(dataEngineType)) {
-		clusterDto = JsonMapperDto.readNode(
-				Paths.get(String.format("%s/%s", CloudHelper.getClusterConfFileLocation(), notebookTemplate),
-						CloudHelper.getDockerTemplateFileForDES(notebookConfig.isDesSpotRequired())).toString(),
-				CloudHelper.getDeployClusterClass());
-    } else {
-		LOGGER.error("illegal argument dataEngineType {} , should be dataengine or dataengine-service", dataEngineType);
-		fail("illegal argument dataEngineType " + dataEngineType + ", should be dataengine or dataengine-service");
-	}
-
-    clusterDto.setName(clusterName);
-		clusterDto.setNotebookName(notebookName);
-		clusterDto = CloudHelper.populateDeployClusterDto(clusterDto, notebookConfig);
-		LOGGER.info("{}: {} cluster = {}", notebookName, dataEngineType, clusterDto);
-    Response responseDeployingCluster = new HttpRequest().webApiPut(ssnCompResURL, ContentType.JSON,
-    		clusterDto, token);
-	LOGGER.info("{}:   responseDeployingCluster.getBody() is {}", notebookName,
-			responseDeployingCluster.getBody().asString());
-	Assert.assertEquals(responseDeployingCluster.statusCode(), HttpStatusCode.OK, dataEngineType +
-			" cluster " + clusterName + " was not deployed");
-
-	gettingStatus = WaitForStatus.cluster(ssnProUserResURL, token, notebookName, clusterName, "creating",
-			getDuration(notebookConfig.getTimeoutClusterCreate()));
-    if(!ConfigPropertyValue.isRunModeLocal()) {
-        if (!(gettingStatus.contains("configuring") || gettingStatus.contains("running")))
-			throw new Exception(notebookName + ": " + dataEngineType + " cluster " + clusterName +
-					" has not been deployed. Cluster status is " + gettingStatus);
-        LOGGER.info("{}: {} cluster {} has been deployed", notebookName, dataEngineType, clusterName);
-
-		VirtualMachineStatusChecker.checkIfRunning(
-				NamingHelper.getClusterInstanceName(notebookName, clusterName, dataEngineType), false);
-
-		Docker.checkDockerStatus(
-				NamingHelper.getClusterContainerName(notebookName, clusterName, "create"), NamingHelper.getSsnIp());
-    }
-    LOGGER.info("{}:   Waiting until {} cluster {} has been configured ...", notebookName,dataEngineType,clusterName);
-
-	gettingStatus = WaitForStatus.cluster(ssnProUserResURL, token, notebookName, clusterName, "configuring",
-			getDuration(notebookConfig.getTimeoutClusterCreate()));
-    if (!gettingStatus.contains("running"))
-		throw new Exception(notebookName + ": " + dataEngineType + " cluster " + clusterName +
-				" has not been configured. Spark cluster status is " + gettingStatus);
-    LOGGER.info(" {}: {} cluster {} has been configured", notebookName, dataEngineType , clusterName);
-
-    if(!ConfigPropertyValue.isRunModeLocal()) {
-		VirtualMachineStatusChecker.checkIfRunning(
-				NamingHelper.getClusterInstanceName(notebookName, clusterName, dataEngineType), false);
-		Docker.checkDockerStatus(
-				NamingHelper.getClusterContainerName(notebookName, clusterName, "create"), NamingHelper.getSsnIp());
-    }
-    if(ConfigPropertyValue.getCloudProvider().equalsIgnoreCase(CloudProvider.AWS_PROVIDER)){
-        LOGGER.info("{}:   Check bucket {}", notebookName, storageName);
-        AmazonHelper.printBucketGrants(storageName);
-    }
-
-    return clusterDto;
-	}
-
-	private String createNotebook(String notebookName, String imageName) throws Exception {
-		LOGGER.info("6. Notebook {} will be created ...", notebookName);
-		String notebookConfigurationFile =
-				String.format(PropertiesResolver.NOTEBOOK_CONFIGURATION_FILE_TEMPLATE, notebookTemplate, notebookTemplate);
-		LOGGER.info("{} notebook configuration file: {}", notebookName, notebookConfigurationFile);
-
-		CreateNotebookDto createNoteBookRequest =
-				JsonMapperDto.readNode(
-						Paths.get(Objects.requireNonNull(CloudHelper.getClusterConfFileLocation()),
-								notebookConfigurationFile).toString(), CreateNotebookDto.class);
-
-		createNoteBookRequest.setName(notebookName);
-		if (!StringUtils.isEmpty(notebookConfig.getNotebookShape())) {
-			createNoteBookRequest.setShape(notebookConfig.getNotebookShape());
-		}
-
-		if (StringUtils.isNotBlank(imageName)) {
-			final String ssnImageDataUrl =
-					String.format(NamingHelper.getSelfServiceURL(ApiPath.IMAGE_CREATION + "/%s"), imageName);
-			LOGGER.info("Image data fetching URL: {}", ssnImageDataUrl);
-
-			Response response = new HttpRequest().webApiGet(ssnImageDataUrl, token);
-			Assert.assertEquals(response.statusCode(), HttpStatusCode.OK, "Cannot get data of machine image with name "
-					+ imageName);
-			ImageDto dto = response.as(ImageDto.class);
-			LOGGER.info("Image dto is: {}", dto);
-			createNoteBookRequest.setImageName(dto.getFullName());
-		}
-
-		LOGGER.info("Inside createNotebook(): createNotebookRequest: image is {}, templateName is {}, shape is {}, " +
-						"version is {}", createNoteBookRequest.getImage(), createNoteBookRequest.getTemplateName(),
-				createNoteBookRequest.getShape(), createNoteBookRequest.getVersion());
-
-		Response responseCreateNotebook = new HttpRequest().webApiPut(ssnExpEnvURL, ContentType.JSON,
-				createNoteBookRequest, token);
-
-		LOGGER.info(" {}:  responseCreateNotebook.getBody() is {}", notebookName,
-				responseCreateNotebook.getBody().asString());
-
-		LOGGER.info("Inside createNotebook(): responseCreateNotebook.statusCode() is {}",
-				responseCreateNotebook.statusCode());
-
-		Assert.assertEquals(responseCreateNotebook.statusCode(), HttpStatusCode.OK,
-				"Notebook " + notebookName + " was not created");
-
-		String gettingStatus = WaitForStatus.notebook(ssnProUserResURL, token, notebookName, "creating",
-				getDuration(notebookConfig.getTimeoutNotebookCreate()));
-		if (!gettingStatus.contains("running")) {
-			LOGGER.error("Notebook {} is in state {}", notebookName, gettingStatus);
-			throw new Exception("Notebook " + notebookName + " has not been created. Notebook status is " + gettingStatus);
-		}
-		LOGGER.info("   Notebook {} has been created", notebookName);
-
-		VirtualMachineStatusChecker.checkIfRunning(NamingHelper.getNotebookInstanceName(notebookName), false);
-
-		Docker.checkDockerStatus(NamingHelper.getNotebookContainerName(notebookName, "create"),
-				NamingHelper.getSsnIp());
-
-		LOGGER.info("   Notebook {} status has been verified", notebookName);
-		//get notebook IP
-		String notebookIp =
-				CloudHelper.getInstancePrivateIP(NamingHelper.getNotebookInstanceName(notebookName), false);
-
-		LOGGER.info("   Notebook {} IP is {}", notebookName, notebookIp);
-
-		return notebookIp;
-	}
-
-	private void createMachineImageFromNotebook(String notebookName, String imageName) throws InterruptedException {
-		final String ssnImageCreationURL = NamingHelper.getSelfServiceURL(ApiPath.IMAGE_CREATION);
-		ExploratoryImageDto requestBody =
-				new ExploratoryImageDto(notebookName, imageName, "Machine image for testing");
-
-		final String ssnImageDataUrl = ssnImageCreationURL + "/" + imageName;
-		LOGGER.info("Machine image data fetching URL: {}", ssnImageDataUrl);
-
-		long currentTime = System.currentTimeMillis() / 1000L;
-		long expiredTime = currentTime + getDuration(notebookConfig.getTimeoutImageCreate()).getSeconds();
-
-		Response imageCreationResponse =
-				new HttpRequest().webApiPost(ssnImageCreationURL, ContentType.JSON, requestBody, token);
-		if (imageCreationResponse.getStatusCode() != HttpStatusCode.ACCEPTED) {
-			LOGGER.error("Machine image creation response status {}, body {}", imageCreationResponse.getStatusCode(),
-					imageCreationResponse.getBody().print());
-			Assert.fail("Cannot create machine image for " + requestBody);
-		}
-
-		while (expiredTime > currentTime) {
-
-			imageCreationResponse = new HttpRequest().webApiGet(ssnImageDataUrl, token);
-			if (imageCreationResponse.getStatusCode() == HttpStatusCode.OK) {
-
-				LOGGER.info("Image creation response body for notebook {} is {}", notebookName,
-						imageCreationResponse.getBody().asString());
-
-				String actualImageStatus = imageCreationResponse.as(ImageDto.class).getStatus();
-
-				LOGGER.info("Current machine image status is: {}", actualImageStatus);
-
-				if (!"created".equalsIgnoreCase(actualImageStatus)) {
-					LOGGER.info("Wait {} sec left for machine image status {}", expiredTime - currentTime,
-							requestBody);
-					TimeUnit.SECONDS.sleep(ConfigPropertyValue.isRunModeLocal() ? 3L : 20L);
-				} else {
-					break;
-				}
-
-			} else {
-				LOGGER.error("Response status{}, body {}", imageCreationResponse.getStatusCode(),
-						imageCreationResponse.getBody().print());
-				Assert.fail("Machine image creation failed for " + notebookName);
-			}
-			currentTime = System.currentTimeMillis() / 1000L;
-		}
-
-		if (expiredTime <= currentTime) {
-			Assert.fail("Due to timeout cannot create machine image on " + notebookName + " " + requestBody);
-		}
-	}
-
-	private boolean areNotebooksEqual(String firstNotebookName, String secondNotebookName) {
-		if (firstNotebookName == null || secondNotebookName == null) {
-			Assert.fail("Wrong exploratory names passed");
-			return false;
-		}
-		Response fetchExploratoriesResponse = new HttpRequest().webApiGet(ssnProUserResURL, token);
-		if (fetchExploratoriesResponse.statusCode() != HttpStatusCode.OK) {
-			LOGGER.error("Response status: {}, body: {}", fetchExploratoriesResponse.getStatusCode(),
-					fetchExploratoriesResponse.getBody().print());
-			Assert.fail("Fetching resource list is failed");
-			return false;
-		}
-		List<Map<String, String>> notebooksTotal = fetchExploratoriesResponse.jsonPath().getList("exploratory");
-		List<Map<String, String>> notebooksFilterred = notebooksTotal.stream()
-				.filter(map -> map.get("exploratory_name").equals(firstNotebookName) ||
-						map.get("exploratory_name").equals(secondNotebookName))
-				.collect(Collectors.toList());
-
-		if (notebooksFilterred.isEmpty()) {
-			Assert.fail("Notebooks with names " + firstNotebookName + ", " + secondNotebookName + " don't exist");
-			return false;
-		}
-		if (notebooksFilterred.size() == 1) {
-			Assert.fail("Only one notebook with name " + notebooksFilterred.get(0).get("exploratory_name") +
-					" found. There is nothing for comparison");
-			return false;
-		}
-		if (notebooksFilterred.size() > 2) {
-			Assert.fail("Error occured: found " + notebooksFilterred.size() + " notebooks, but only 2 expected");
-			return false;
-		}
-
-		return areNotebooksEqualByFields(notebooksFilterred.get(0), notebooksFilterred.get(1)) &&
-				areLibListsEqual(getNotebookLibList(firstNotebookName), getNotebookLibList(secondNotebookName));
-
-	}
-
-	private boolean areNotebooksEqualByFields(Map<String, String> firstNotebook, Map<String, String> secondNotebook) {
-		if (!firstNotebook.get("shape").equals(secondNotebook.get("shape"))) {
-			Assert.fail("Notebooks aren't equal: they have different shapes");
-			return false;
-		}
-		if (!firstNotebook.get("image").equals(secondNotebook.get("image"))) {
-			Assert.fail("Notebooks aren't equal: they are created from different Docker images");
-			return false;
-		}
-		if (!firstNotebook.get("template_name").equals(secondNotebook.get("template_name"))) {
-			Assert.fail("Notebooks aren't equal: they are created from different templates");
-			return false;
-		}
-		if (!firstNotebook.get("version").equals(secondNotebook.get("version"))) {
-			Assert.fail("Notebooks aren't equal: they have different versions");
-			return false;
-		}
-		return true;
-	}
-
-	private List<Lib> getNotebookLibList(String notebookName) {
-		Map<String, String> params = new HashMap<>();
-		params.put("exploratory_name", notebookName);
-		Response libListResponse = new HttpRequest()
-				.webApiGet(NamingHelper.getSelfServiceURL(ApiPath.LIB_LIST_EXPLORATORY_FORMATTED), token, params);
-		List<Lib> libs = null;
-		if (libListResponse.getStatusCode() == HttpStatusCode.OK) {
-			libs = Arrays.asList(libListResponse.getBody().as(Lib[].class));
-		} else {
-			LOGGER.error("Response status {}, body {}", libListResponse.getStatusCode(), libListResponse.getBody()
-					.print());
-			Assert.fail("Cannot get lib list for " + libListResponse);
-			return libs;
-		}
-		return libs.stream().filter(Objects::nonNull).collect(Collectors.toList());
-	}
-
-	private boolean areLibListsEqual(List<Lib> firstLibList, List<Lib> secondLibList) {
-		if (firstLibList == null && secondLibList == null) {
-			return true;
-		}
-		if (firstLibList == null || secondLibList == null || firstLibList.size() != secondLibList.size()) {
-			return false;
-		}
-		for (Lib lib : firstLibList) {
-			String libGroup = lib.getGroup();
-			String libName = lib.getName();
-			String libVersion = lib.getVersion();
-			List<Lib> filterred = secondLibList.stream().filter(l ->
-					l.getGroup().equals(libGroup) && l.getName().equals(libName) && l.getVersion().equals(libVersion))
-					.collect(Collectors.toList());
-			if (filterred.isEmpty()) {
-				return false;
-			}
-		}
-		return true;
-	}
-
-	private void testLibs() throws Exception {
-		LOGGER.info("{}: install libraries  ...", notebookName);
-
-		TestLibGroupStep testLibGroupStep = new TestLibGroupStep(ApiPath.LIB_GROUPS, token, notebookName,
-				getDuration(notebookConfig.getTimeoutLibGroups()).getSeconds(),
-				getTemplateTestLibFile(LibsHelper.getLibGroupsPath(notebookName)));
-
-		testLibGroupStep.init();
-		testLibGroupStep.verify();
-
-		List<LibToSearchData> libToSearchDataList = JsonMapperDto.readListOf(
-				getTemplateTestLibFile(LibsHelper.getLibListPath(notebookName)), LibToSearchData.class);
-
-		LOGGER.debug("Skipped libraries for notebook {}: {}", notebookName, skippedLibraries);
-		int maxLibsFailedToInstall = libToSearchDataList.size();
-
-		for (LibToSearchData libToSearchData : libToSearchDataList) {
-			TestLibListStep testLibListStep = new TestLibListStep(ApiPath.LIB_LIST, token, notebookName,
-					getDuration(notebookConfig.getTimeoutLibList()).getSeconds(), libToSearchData);
-
-			testLibListStep.init();
-			testLibListStep.verify();
-
-			Lib lib;
-			do {
-				lib = testLibListStep.getLibs().get(new Random().nextInt(testLibListStep.getLibs().size()));
-			} while (skippedLibraries.contains(lib));
-
-			TestLibInstallStep testLibInstallStep =
-					new TestLibInstallStep(ApiPath.LIB_INSTALL, ApiPath.LIB_LIST_EXPLORATORY_FORMATTED,
-							token, notebookName, getDuration(notebookConfig.getTimeoutLibInstall()).getSeconds(), lib);
-
-			testLibInstallStep.init();
-			testLibInstallStep.verify();
-			if (!testLibInstallStep.isLibraryInstalled()) {
-				libsFailedToInstall++;
-			}
-			if (libsFailedToInstall == maxLibsFailedToInstall) {
-				Assert.fail("Test for library installing is failed: there are not any installed library");
-			}
-
-			LOGGER.info("{}: current quantity of failed libs to install: {}", notebookName, libsFailedToInstall);
-		}
-		LOGGER.info("{}: installed {} testing libraries from {}", notebookName,
-				(maxLibsFailedToInstall - libsFailedToInstall), maxLibsFailedToInstall);
-	}
-
-	private String getTemplateTestLibFile(String fileName) {
-        String absoluteFileName = Paths.get(PropertiesResolver.getNotebookTestLibLocation(), fileName).toString();
-        LOGGER.info("Absolute file name is {}", absoluteFileName);
-        return absoluteFileName;
-   }
-
-   private void restartNotebookAndRedeployToTerminate(DeployClusterDto deployClusterDto) throws Exception {
-	   restartNotebook();
-	   final String clusterNewName = redeployCluster(deployClusterDto);
-	   terminateCluster(clusterNewName);
-   }
-
-
-	private void restartNotebook() throws Exception {
-       LOGGER.info("9. Notebook {} will be re-started ...", notebookName);
-       String requestBody = "{\"notebook_instance_name\":\"" + notebookName + "\"}";
-       Response respStartNotebook = new HttpRequest().webApiPost(ssnExpEnvURL, ContentType.JSON, requestBody, token);
-       LOGGER.info("    respStartNotebook.getBody() is {}", respStartNotebook.getBody().asString());
-       Assert.assertEquals(respStartNotebook.statusCode(), HttpStatusCode.OK);
-
-		String gettingStatus = WaitForStatus.notebook(ssnProUserResURL, token, notebookName,
-			VirtualMachineStatusChecker.getStartingStatus(), getDuration(notebookConfig.getTimeoutNotebookStartup()));
-       String status = VirtualMachineStatusChecker.getRunningStatus();
-       if (!Objects.requireNonNull(status).contains(gettingStatus)){
-           throw new Exception("Notebook " + notebookName + " has not been started. Notebook status is " + gettingStatus);
-       }
-       LOGGER.info("    Notebook {} has been started", notebookName);
-
-       VirtualMachineStatusChecker.checkIfRunning(NamingHelper.getNotebookInstanceName(notebookName), false);
-
-       Docker.checkDockerStatus(NamingHelper.getNotebookContainerName(notebookName, "start"), NamingHelper.getSsnIp());
-   }
-
-   private void terminateNotebook(String notebookName) throws Exception {
-       String gettingStatus;
-       LOGGER.info("12. Notebook {} will be terminated ...", notebookName);
-       final String ssnTerminateNotebookURL = NamingHelper.getSelfServiceURL(ApiPath.getTerminateNotebookUrl(notebookName));
-       Response respTerminateNotebook = new HttpRequest().webApiDelete(ssnTerminateNotebookURL, ContentType.JSON, token);
-       LOGGER.info("    respTerminateNotebook.getBody() is {}", respTerminateNotebook.getBody().asString());
-       Assert.assertEquals(respTerminateNotebook.statusCode(), HttpStatusCode.OK);
-
-	   gettingStatus = WaitForStatus.notebook(ssnProUserResURL, token, notebookName, "terminating",
-			   getDuration(notebookConfig.getTimeoutClusterTerminate()));
-       if (!gettingStatus.contains("terminated"))
-           throw new Exception("Notebook" + notebookName + " has not been terminated. Notebook status is " +
-				   gettingStatus);
-
-       VirtualMachineStatusChecker.checkIfTerminated(NamingHelper.getNotebookInstanceName(notebookName), false);
-       Docker.checkDockerStatus(NamingHelper.getNotebookContainerName(notebookName, "terminate"), NamingHelper.getSsnIp());
-   }
-
-   private void terminateNotebook(DeployClusterDto deployCluster) throws Exception {
-	   terminateNotebook(deployCluster.getNotebookName());
-
-       String gettingStatus = WaitForStatus.getClusterStatus(
-				new HttpRequest()
-					.webApiGet(ssnProUserResURL, token)
-					.getBody()
-					.jsonPath(),
-			   deployCluster.getNotebookName(), deployCluster.getName());
-       if (!gettingStatus.contains("terminated"))
-		   throw new Exception(dataEngineType + " cluster " + deployCluster.getName() + " has not been terminated for Notebook "
-				   + deployCluster.getNotebookName() + ". Cluster status is " + gettingStatus);
-	   LOGGER.info("    {} cluster {} has been terminated for Notebook {}", dataEngineType, deployCluster.getName(),
-			   deployCluster.getNotebookName());
-
-	   VirtualMachineStatusChecker.checkIfTerminated(
-			   NamingHelper.getClusterInstanceName(
-					   deployCluster.getNotebookName(), deployCluster.getName(), dataEngineType), true);
-
-   }
-
-	private void startCluster() throws Exception {
-		String gettingStatus;
-		LOGGER.info("    Cluster {} will be started for notebook {} ...", clusterName, notebookName);
-		final String ssnStartClusterURL =
-				NamingHelper.getSelfServiceURL(ApiPath.getStartClusterUrl(notebookName, clusterName));
-		LOGGER.info("    SSN start cluster URL is {}", ssnStartClusterURL);
-
-		Response respStartCluster = new HttpRequest().webApiPut(ssnStartClusterURL, ContentType.JSON, token);
-		LOGGER.info("    respStartCluster.getBody() is {}", respStartCluster.getBody().asString());
-		Assert.assertEquals(respStartCluster.statusCode(), HttpStatusCode.OK);
-
-		gettingStatus = WaitForStatus.cluster(ssnProUserResURL, token, notebookName, clusterName, "starting",
-				getDuration(notebookConfig.getTimeoutClusterStartup()));
-		if (!gettingStatus.contains("running"))
-			throw new Exception(dataEngineType + " cluster " + clusterName +
-					" has not been started. Cluster status is " + gettingStatus);
-		LOGGER.info("    {} cluster {} has been started for notebook {}", dataEngineType, clusterName,
-				notebookName);
-
-		VirtualMachineStatusChecker.checkIfRunning(
-				NamingHelper.getClusterInstanceName(notebookName, clusterName, dataEngineType), true);
-
-		Docker.checkDockerStatus(
-				NamingHelper.getClusterContainerName(notebookName, clusterName, "start"), NamingHelper.getSsnIp());
-	}
-
-	private void stopCluster() throws Exception {
-		String gettingStatus;
-		LOGGER.info("    Cluster {} will be stopped for notebook {} ...", clusterName, notebookName);
-		final String ssnStopClusterURL =
-				NamingHelper.getSelfServiceURL(ApiPath.getStopClusterUrl(notebookName, clusterName));
-		LOGGER.info("    SSN stop cluster URL is {}", ssnStopClusterURL);
-
-		Response respStopCluster = new HttpRequest().webApiDelete(ssnStopClusterURL, ContentType.JSON, token);
-		LOGGER.info("    respStopCluster.getBody() is {}", respStopCluster.getBody().asString());
-		Assert.assertEquals(respStopCluster.statusCode(), HttpStatusCode.OK);
-
-		gettingStatus = WaitForStatus.cluster(ssnProUserResURL, token, notebookName, clusterName, "stopping",
-				getDuration(notebookConfig.getTimeoutClusterStop()));
-		if (!gettingStatus.contains("stopped"))
-			throw new Exception(dataEngineType + " cluster " + clusterName +
-					" has not been stopped. Cluster status is " + gettingStatus);
-		LOGGER.info("    {} cluster {} has been stopped for notebook {}", dataEngineType, clusterName,
-				notebookName);
-
-		VirtualMachineStatusChecker.checkIfStopped(
-				NamingHelper.getClusterInstanceName(notebookName, clusterName, dataEngineType), true);
-
-		Docker.checkDockerStatus(
-				NamingHelper.getClusterContainerName(notebookName, clusterName, "stop"), NamingHelper.getSsnIp());
-	}
-   
-   private void terminateCluster(String clusterNewName) throws Exception {
-       String gettingStatus;
-       LOGGER.info("    New cluster {} will be terminated for notebook {} ...", clusterNewName, notebookName);
-	   final String ssnTerminateClusterURL =
-			   NamingHelper.getSelfServiceURL(ApiPath.getTerminateClusterUrl(notebookName, clusterNewName));
-       LOGGER.info("    SSN terminate cluster URL is {}", ssnTerminateClusterURL);
-
-       Response respTerminateCluster = new HttpRequest().webApiDelete(ssnTerminateClusterURL, ContentType.JSON, token);
-       LOGGER.info("    respTerminateCluster.getBody() is {}", respTerminateCluster.getBody().asString());
-       Assert.assertEquals(respTerminateCluster.statusCode(), HttpStatusCode.OK);
-
-	   gettingStatus = WaitForStatus.cluster(ssnProUserResURL, token, notebookName, clusterNewName, "terminating",
-			   getDuration(notebookConfig.getTimeoutClusterTerminate()));
-       if (!gettingStatus.contains("terminated"))
-		   throw new Exception("New " + dataEngineType + " cluster " + clusterNewName +
-				   " has not been terminated. Cluster status is " + gettingStatus);
-       LOGGER.info("    New {} cluster {} has been terminated for notebook {}",dataEngineType, clusterNewName,
-			   notebookName);
-
-	   VirtualMachineStatusChecker.checkIfTerminated(
-			   NamingHelper.getClusterInstanceName(notebookName, clusterNewName, dataEngineType), true);
-
-	   Docker.checkDockerStatus(
-			   NamingHelper.getClusterContainerName(notebookName, clusterNewName, "terminate"),
-			   NamingHelper.getSsnIp());
-   }
-
-   private String redeployCluster(DeployClusterDto deployCluster) throws Exception {
-       final String clusterNewName = "New" + clusterName;
-       String gettingStatus;
-
-	   LOGGER.info("10. New {} cluster {} will be deployed for termination for notebook {} ...", dataEngineType,
-			   clusterNewName, notebookName);
-
-       deployCluster.setName(clusterNewName);
-	   deployCluster.setNotebookName(notebookName);
-       Response responseDeployingClusterNew = new HttpRequest().webApiPut(ssnCompResURL, ContentType.JSON, deployCluster, token);
-       LOGGER.info("    responseDeployingClusterNew.getBody() is {}", responseDeployingClusterNew.getBody().asString());
-       Assert.assertEquals(responseDeployingClusterNew.statusCode(), HttpStatusCode.OK);
-
-	   gettingStatus = WaitForStatus.cluster(ssnProUserResURL, token, notebookName, clusterNewName, "creating",
-			   getDuration(notebookConfig.getTimeoutClusterCreate()));
-       if (!(gettingStatus.contains("configuring") || gettingStatus.contains("running")))
-           throw new Exception("New cluster " + clusterNewName + " has not been deployed. Cluster status is " + gettingStatus);
-       LOGGER.info("    New cluster {} has been deployed", clusterNewName);
-
-       LOGGER.info("   Waiting until cluster {} has been configured ...", clusterNewName);
-	   gettingStatus = WaitForStatus.cluster(ssnProUserResURL, token, notebookName, clusterNewName, "configuring",
-			   getDuration(notebookConfig.getTimeoutClusterCreate()));
-       if (!gettingStatus.contains("running"))
-           throw new Exception("Cluster " + clusterNewName + " has not been configured. Cluster status is " +
-				   gettingStatus);
-       LOGGER.info("   Cluster {} has been configured", clusterNewName);
-
-	   VirtualMachineStatusChecker.checkIfRunning(
-			   NamingHelper.getClusterInstanceName(notebookName, clusterNewName, dataEngineType), true);
-
-	   Docker.checkDockerStatus(NamingHelper.getClusterContainerName(notebookName, clusterNewName, "create"),
-			   NamingHelper.getSsnIp());
-       return clusterNewName;
-   }
-
-   private void stopEnvironment() throws Exception {
-       String gettingStatus;
-       LOGGER.info("8. Notebook {} will be stopped ...", notebookName);
-       final String ssnStopNotebookURL = NamingHelper.getSelfServiceURL(ApiPath.getStopNotebookUrl(notebookName));
-       LOGGER.info("   SSN stop notebook URL is {}", ssnStopNotebookURL);
-
-       Response responseStopNotebook = new HttpRequest().webApiDelete(ssnStopNotebookURL, ContentType.JSON, token);
-       LOGGER.info("   responseStopNotebook.getBody() is {}", responseStopNotebook.getBody().asString());
-	   Assert.assertEquals(responseStopNotebook.statusCode(), HttpStatusCode.OK, "Notebook " + notebookName +
-			   " was not stopped");
-
-	   gettingStatus = WaitForStatus.notebook(ssnProUserResURL, token, notebookName, "stopping",
-			   getDuration(notebookConfig.getTimeoutNotebookShutdown()));
-       if (!gettingStatus.contains("stopped"))
-           throw new Exception("Notebook " + notebookName + " has not been stopped. Notebook status is " +
-				   gettingStatus);
-       LOGGER.info("   Notebook {} has been stopped", notebookName);
-	   if (!clusterName.equalsIgnoreCase(NamingHelper.CLUSTER_ABSENT)) {
-		   gettingStatus = WaitForStatus.getClusterStatus(
-				   new HttpRequest()
-						   .webApiGet(ssnProUserResURL, token)
-						   .getBody()
-						   .jsonPath(),
-				   notebookName, clusterName);
-
-		   if (NamingHelper.DATA_ENGINE.equals(dataEngineType) && !gettingStatus.contains("stopped")){
-			   throw new Exception("Computational resources has not been stopped for Notebook " + notebookName +
-					   ". Data engine status is " + gettingStatus);
-		   } else if (NamingHelper.DATA_ENGINE_SERVICE.equals(dataEngineType) &&
-				   !ConfigPropertyValue.getCloudProvider().equalsIgnoreCase(CloudProvider.AZURE_PROVIDER)
-				   && !gettingStatus.contains("terminated")){
-			   throw new Exception("Computational resources has not been terminated for Notebook " + notebookName +
-					   ". Data engine service status is " + gettingStatus);
-		   }
-
-		   LOGGER.info("   Computational resources has been terminated for notebook {}", notebookName);
-
-		   if (NamingHelper.DATA_ENGINE.equals(dataEngineType)){
-			   VirtualMachineStatusChecker.checkIfStopped(NamingHelper.getClusterInstanceName(notebookName,
-					   clusterName, dataEngineType), true);
-		   } else if (NamingHelper.DATA_ENGINE_SERVICE.equals(dataEngineType)){
-			   VirtualMachineStatusChecker.checkIfTerminated(NamingHelper.getClusterInstanceName(notebookName,
-					   clusterName, dataEngineType), true);
-		   }
-
-	   }
-       Docker.checkDockerStatus(NamingHelper.getNotebookContainerName(notebookName, "stop"), NamingHelper.getSsnIp());
-   }
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/TestDataEngineService.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/TestDataEngineService.java
deleted file mode 100644
index ad73842..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/TestDataEngineService.java
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test;
-
-import com.epam.dlab.automation.docker.AckStatus;
-import com.epam.dlab.automation.docker.SSHConnect;
-import com.epam.dlab.automation.helper.CloudHelper;
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.epam.dlab.automation.helper.NamingHelper;
-import com.epam.dlab.automation.helper.PropertiesResolver;
-import com.jcraft.jsch.*;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Vector;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static org.testng.Assert.*;
-
-class TestDataEngineService {
-    private final static Logger LOGGER = LogManager.getLogger(TestDataEngineService.class);
-    
-    private final static String COMMAND_COPY_TO_NOTEBOOK;
-    private final static String COMMAND_RUN_PYTHON;
-    private final static String COMMAND_RUN_PYTHON2;
-
-    static {
-        COMMAND_COPY_TO_NOTEBOOK = "scp -r -i %s -o 'StrictHostKeyChecking no' ~/%s %s@%s:/tmp/%s";
-        COMMAND_RUN_PYTHON = CloudHelper.getPythonTestingScript();
-        COMMAND_RUN_PYTHON2 = CloudHelper.getPythonTestingScript2();
-    }
-
-
-	void run(String notebookName, String notebookTemplate, String clusterName) throws Exception {
-        Session ssnSession = null;
-        try {
-            LOGGER.info("{}: Copying test data copy scripts {} to SSN {}...",
-            		notebookName, NamingHelper.getStorageName(), NamingHelper.getSsnIp());
-            ssnSession = SSHConnect.getSession(ConfigPropertyValue.getClusterOsUser(), NamingHelper.getSsnIp(), 22);
-            copyFileToSSN(ssnSession, PropertiesResolver.getNotebookTestDataCopyScriptLocation(), "");
-			executePythonScript2(ssnSession, clusterName,
-					new File(PropertiesResolver.getNotebookTestDataCopyScriptLocation()).getName(),
-					notebookName, notebookTemplate);
-        } finally {
-            if (ssnSession != null && ssnSession.isConnected()) {
-	            ssnSession.disconnect();
-	        }
-        }
-    }
-    
-    //TODO refactor two methods and make one
-	private void executePythonScript2(Session ssnSession, String clusterName, String notebookTestFile,
-									  String notebookName, String notebookTemplate) throws JSchException,
-			InterruptedException {
-        String command;
-        AckStatus status;
-
-        command = String.format(COMMAND_RUN_PYTHON2, ConfigPropertyValue.getClusterOsUser(), notebookTestFile,
-				NamingHelper.getStorageName(), notebookTemplate);
-        LOGGER.info("{}: Executing command {}...", notebookName, command);
-
-        ChannelExec runScript = SSHConnect.setCommand(ssnSession, command);
-        status = SSHConnect.checkAck(runScript);
-        LOGGER.info("{}: Script execution status message {} and code {}", notebookName, status.getMessage(), status.getStatus());
-        assertTrue(status.isOk(), notebookName + ": The python script execution wasn`t successful on : " + clusterName);
-
-        LOGGER.info("{}: Python script executed successfully ", notebookName);
-    }
-
-	private void executePythonScript(String Ip, String cluster_name, String notebookTestFile, int assignedPort,
-									 String notebookName) throws JSchException, InterruptedException {
-        String command;
-        AckStatus status;
-        Session session = SSHConnect.getForwardedConnect(ConfigPropertyValue.getClusterOsUser(), Ip, assignedPort);
-
-        try {
-            command = String.format(COMMAND_RUN_PYTHON,
-                    "/tmp/" +  notebookTestFile,
-                    NamingHelper.getStorageName(),
-                    cluster_name,
-                    ConfigPropertyValue.getClusterOsUser());
-            LOGGER.info(String.format("{}: Executing command %s...", command), notebookName);
-
-            ChannelExec runScript = SSHConnect.setCommand(session, command);
-            status = SSHConnect.checkAck(runScript);
-			LOGGER.info("{}: Script execution status message {} and status code {}", notebookName, status.getMessage(),
-					status.getStatus());
-            assertTrue(status.isOk(), notebookName + ": The python script execution wasn`t successful on " + cluster_name);
-
-            LOGGER.info("{}: Python script executed successfully ", notebookName);
-        }
-        finally {
-            if(session != null && session.isConnected()) {
-                LOGGER.info("{}: Closing notebook session", notebookName);
-                session.disconnect();
-            }
-        }
-    }
-
-	void run2(String ssnIP, String noteBookIp, String clusterName, File notebookScenarioDirectory,
-			  File notebookTemplatesDirectory, String notebookName)
-            throws JSchException, IOException, InterruptedException {
-		LOGGER.info("Python tests for directories {} and {} will be started ...", notebookScenarioDirectory,
-				notebookTemplatesDirectory);
-    	if (ConfigPropertyValue.isRunModeLocal()) {
-    		LOGGER.info("  tests are skipped");
-    		return;
-    	}
-
-		assertTrue(notebookScenarioDirectory.exists(), notebookName + ": Checking notebook scenario directory " +
-				notebookScenarioDirectory);
-        assertTrue(notebookScenarioDirectory.isDirectory());
-
-		assertTrue(notebookTemplatesDirectory.exists(), notebookName + ": Checking notebook templates directory " +
-				notebookTemplatesDirectory);
-        assertTrue(notebookTemplatesDirectory.isDirectory());
-
-        String [] templatesFiles = notebookTemplatesDirectory.list();
-        assertNotNull(templatesFiles, "Notebook " + notebookName + " templates directory is empty!");
-
-    	String [] scenarioFiles = notebookScenarioDirectory.list();
-        assertNotNull(scenarioFiles, "Notebook " + notebookName + " scenario directory is empty!");
-
-		assertEquals(1, scenarioFiles.length, "The python script location " + notebookScenarioDirectory +
-				" found more more then 1 file, expected 1 *.py file, but found multiple files: " +
-				Arrays.toString(scenarioFiles));
-        assertTrue(scenarioFiles[0].endsWith(".py"), "The python script was not found");
-        // it is assumed there should be 1 python file.
-        String notebookScenarioTestFile = scenarioFiles[0];
-
-        Session ssnSession = SSHConnect.getSession(ConfigPropertyValue.getClusterOsUser(), ssnIP, 22);
-        try {
-            LOGGER.info("{}: Copying scenario test file to SSN {}...", notebookName, ssnIP);
-			copyFileToSSN(ssnSession, Paths.get(notebookScenarioDirectory.getAbsolutePath(),
-					notebookScenarioTestFile).toString(), "");
-
-        	LOGGER.info("{}: Copying scenario test file to Notebook {}...", notebookName, noteBookIp);
-            copyFileToNotebook(ssnSession, notebookScenarioTestFile, noteBookIp, "");
-
-            LOGGER.info("In notebook templates directory {} available following template files: {}",
-                    notebookTemplatesDirectory, Arrays.toString(templatesFiles));
-
-            if(existsInSSN(ssnSession, NamingHelper.getNotebookTestTemplatesPath(notebookName))){
-				LOGGER.info("{}: Corresponding folder for notebook templates already exists in SSN {} " +
-						"and will be removed ...", notebookName, ssnIP);
-                removeFromSSN(ssnSession, NamingHelper.getNotebookTestTemplatesPath(notebookName).split("/")[0]);
-            }
-
-            LOGGER.info("{}: Creating subfolder in home directory in SSN for copying templates {}...", notebookName, ssnIP);
-            mkDirInSSN(ssnSession, NamingHelper.getNotebookTestTemplatesPath(notebookName));
-
-            LOGGER.info("{}: Copying templates to SSN {}...", notebookName, ssnIP);
-            for(String filename : templatesFiles){
-                copyFileToSSN(ssnSession, Paths.get(notebookTemplatesDirectory.getAbsolutePath(), filename).toString(),
-                        NamingHelper.getNotebookTestTemplatesPath(notebookName));
-            }
-
-            LOGGER.info("{}: Copying templates to Notebook {}...", notebookName, noteBookIp);
-            copyFileToNotebook(ssnSession, NamingHelper.getNotebookTestTemplatesPath(notebookName),
-                        noteBookIp, notebookName);
-
-			if (!clusterName.equalsIgnoreCase(NamingHelper.CLUSTER_ABSENT)
-					|| !NamingHelper.isClusterRequired(notebookName)) {
-				LOGGER.info("{}: Port forwarding from ssn {} to notebook {}...", notebookName, ssnIP, noteBookIp);
-				int assignedPort = ssnSession.setPortForwardingL(0, noteBookIp, 22);
-				LOGGER.info("{}: Port forwarded localhost:{} -> {}:22", notebookName, assignedPort, noteBookIp);
-				executePythonScript(noteBookIp, clusterName, notebookScenarioTestFile, assignedPort, notebookName);
-			}
-        }
-        finally {
-            if(ssnSession != null && ssnSession.isConnected()) {
-                LOGGER.info("{}: Closing ssn session", notebookName);
-                ssnSession.disconnect();
-            }
-        }
-    }
-
-    // Copies file to subfolder of home directory of SSN. If parameter 'destDirectoryInSSN' is empty string then copies
-    // to home directory.
-	private void copyFileToSSN(Session ssnSession, String sourceFilenameWithPath, String destDirectoryInSSN)
-			throws IOException, JSchException {
-        LOGGER.info("Copying {} to SSN...", sourceFilenameWithPath);
-        File file = new File(sourceFilenameWithPath);
-        assertTrue(file.exists(), "Source file " + sourceFilenameWithPath + " doesn't exist!");
-        LOGGER.info("Source file {} exists: {}", sourceFilenameWithPath, file.exists());
-
-        ChannelSftp channelSftp = null;
-        FileInputStream src = new FileInputStream(file);
-        try {
-        	channelSftp = SSHConnect.getChannelSftp(ssnSession);
-			channelSftp.put(src,
-					String.format("/home/%s/%s%s", ConfigPropertyValue.getClusterOsUser(), destDirectoryInSSN, file
-							.getName()));
-        } catch (SftpException e) {
-            LOGGER.error("An error occured during copying file to SSN: {}", e);
-			fail("Copying file " + file.getName() + " to SSN is failed");
-        } finally {
-            if(channelSftp != null && channelSftp.isConnected()) {
-                channelSftp.disconnect();
-            }
-        }
-
-    }
-
-    // Creates a folder in home directory of SSN
-    private void mkDirInSSN(Session ssnSession, String directoryName) throws JSchException {
-        String newDirectoryAbsolutePath = String.format("/home/%s/%s", ConfigPropertyValue.getClusterOsUser(), directoryName);
-        LOGGER.info("Creating directory {} in SSN...", newDirectoryAbsolutePath);
-
-        ChannelSftp channelSftp = null;
-        try {
-            channelSftp = SSHConnect.getChannelSftp(ssnSession);
-            if(!directoryName.equals("")){
-                String[] partsOfPath = directoryName.split("/");
-                StringBuilder sb = new StringBuilder();
-                for(String partOfPath : partsOfPath){
-                    if(partOfPath.equals("")){
-                        continue;
-                    }
-                    sb.append(partOfPath);
-                    if(!existsInSSN(ssnSession, sb.toString())){
-                        LOGGER.info("Creating directory {} in SSN...",
-                                String.format("/home/%s/%s", ConfigPropertyValue.getClusterOsUser(), sb.toString()));
-                        channelSftp.mkdir(String.format("/home/%s/%s", ConfigPropertyValue.getClusterOsUser(), sb.toString()));
-                    }
-                    sb.append("/");
-                }
-            }
-            assertTrue(channelSftp.stat(newDirectoryAbsolutePath).isDir(), "Directory " + newDirectoryAbsolutePath +
-                    " wasn't created in SSN!");
-        } catch (SftpException e) {
-            LOGGER.error("An error occured during creation directory in SSN: {}", e);
-			fail("Creating directory " + newDirectoryAbsolutePath + " in SSN is failed");
-        } finally {
-            if(channelSftp != null && channelSftp.isConnected()) {
-                channelSftp.disconnect();
-            }
-        }
-
-    }
-
-    // Checks if file exists in home directory of SSN
-    private boolean existsInSSN(Session ssnSession, String fileName) throws JSchException {
-        String homeDirectoryAbsolutePath = String.format("/home/%s", ConfigPropertyValue.getClusterOsUser());
-        LOGGER.info("Checking if file/directory {} exists in home directory {} of SSN...", fileName, homeDirectoryAbsolutePath);
-
-        boolean isFileEmbeddedIntoFolder = fileName.contains("/");
-        ChannelSftp channelSftp = null;
-        List<String> fileNames = new ArrayList<>();
-        try {
-            channelSftp = SSHConnect.getChannelSftp(ssnSession);
-            Vector fileDataList = channelSftp.ls(homeDirectoryAbsolutePath);
-            for (Object fileData : fileDataList) {
-                ChannelSftp.LsEntry entry = (ChannelSftp.LsEntry) fileData;
-                fileNames.add(entry.getFilename());
-            }
-            if(fileNames.isEmpty()){
-				LOGGER.info("Does file/directory {} exist in home directory {} of SSN: {}",
-                        fileName, homeDirectoryAbsolutePath, "false");
-                return false;
-            }
-            LOGGER.info("In home directory {} of SSN there are following files: {}",
-                    homeDirectoryAbsolutePath, fileNames);
-            if(!isFileEmbeddedIntoFolder){
-				LOGGER.info("Does file/directory {} exist in home directory {} of SSN: {}",
-                        fileName, homeDirectoryAbsolutePath, fileNames.contains(fileName));
-                return fileNames.contains(fileName);
-            }else{
-                List<String> partsOfPath =
-                        Stream.of(fileName.split("/")).filter(e -> !e.equals("")).collect(Collectors.toList());
-                StringBuilder currentPath = new StringBuilder(homeDirectoryAbsolutePath);
-                for(int i = 0; i < partsOfPath.size(); i++){
-                    String partOfPath = partsOfPath.get(i);
-                    if(fileNames.isEmpty() || !fileNames.contains(partOfPath)){
-						LOGGER.info("Does file/directory {} exist in home directory {} of SSN: {}",
-                                fileName, homeDirectoryAbsolutePath, "false");
-                        return false;
-                    }else{
-                        if(i == partsOfPath.size() - 1){
-							LOGGER.info("Does file/directory {} exist in home directory {} of SSN: {}",
-                                    fileName, homeDirectoryAbsolutePath, "true");
-                            return true;
-                        }
-                        currentPath.append("/").append(partOfPath);
-                        fileDataList = channelSftp.ls(currentPath.toString());
-                        fileNames = new ArrayList<>();
-                        for (Object fileData : fileDataList) {
-                            ChannelSftp.LsEntry entry = (ChannelSftp.LsEntry) fileData;
-                            fileNames.add(entry.getFilename());
-                        }
-
-                    }
-
-                }
-
-            }
-
-        } catch (SftpException e) {
-            LOGGER.error("An error occured during obtaining list of files from home directory in SSN: {}", e);
-        } finally {
-            if(channelSftp != null && channelSftp.isConnected()) {
-                channelSftp.disconnect();
-            }
-        }
-		LOGGER.info("Does file/directory {} exist in home directory {} of SSN: {}",
-                fileName, homeDirectoryAbsolutePath, "false");
-        return false;
-    }
-
-    // Removes file or directory from home directory of SSN
-    private void removeFromSSN(Session ssnSession, String fileNameWithRelativePath) throws JSchException {
-        String absoluteFilePath = String.format("/home/%s/%s", ConfigPropertyValue.getClusterOsUser(), fileNameWithRelativePath);
-
-        ChannelSftp channelSftp = null;
-        try {
-            channelSftp = SSHConnect.getChannelSftp(ssnSession);
-            boolean isDir = channelSftp.stat(absoluteFilePath).isDir();
-            LOGGER.info("Is file {} a directory in SSN: {}", absoluteFilePath, isDir);
-            if(isDir){
-                LOGGER.info("Removing directory {} from SSN...", absoluteFilePath);
-                recursiveDirectoryDelete(ssnSession, absoluteFilePath);
-            }else{
-                LOGGER.info("Removing file {} from SSN...", absoluteFilePath);
-                channelSftp.rm(absoluteFilePath);
-            }
-        } catch (SftpException e) {
-            LOGGER.error("An error occured during removing file {} from SSN: {}", absoluteFilePath, e);
-        } finally {
-            if(channelSftp != null && channelSftp.isConnected()) {
-                channelSftp.disconnect();
-            }
-        }
-    }
-
-    private void recursiveDirectoryDelete(Session ssnSession, String remoteDir) throws JSchException{
-        ChannelSftp channelSftp = null;
-        try{
-            channelSftp = SSHConnect.getChannelSftp(ssnSession);
-            boolean isDir = channelSftp.stat(remoteDir).isDir();
-            if(isDir){
-                Vector dirList = channelSftp.ls(remoteDir);
-                for(Object fileData : dirList){
-                    ChannelSftp.LsEntry entry = (ChannelSftp.LsEntry) fileData;
-                    if(!(entry.getFilename().equals(".") || entry.getFilename().equals(".."))){
-                        if(entry.getAttrs().isDir()){
-                            recursiveDirectoryDelete(ssnSession, remoteDir + File.separator
-                                    + entry.getFilename() + File.separator);
-                        }
-                        else{
-                            channelSftp.rm(remoteDir + entry.getFilename());
-                        }
-                    }
-                }
-                channelSftp.cd("..");
-                channelSftp.rmdir(remoteDir);
-            }
-        }
-        catch (SftpException e){
-            LOGGER.error("An error occured while deleting directory {}: {}", remoteDir, e.getMessage());
-        }
-        finally {
-            if(channelSftp != null && channelSftp.isConnected()) {
-                channelSftp.disconnect();
-            }
-        }
-    }
-
-	private void copyFileToNotebook(Session session, String filename, String ip, String notebookName)
-			throws JSchException, InterruptedException {
-    	String command = String.format(COMMAND_COPY_TO_NOTEBOOK,
-    			"keys/"+ Paths.get(ConfigPropertyValue.getAccessKeyPrivFileName()).getFileName().toString(),
-                filename,
-                ConfigPropertyValue.getClusterOsUser(),
-                ip,
-                NamingHelper.getNotebookType(notebookName));
-
-    	LOGGER.info("Copying {} to notebook...", filename);
-    	LOGGER.info("  Run command: {}", command);
-
-        ChannelExec copyResult = SSHConnect.setCommand(session, command);
-        AckStatus status = SSHConnect.checkAck(copyResult);
-
-        LOGGER.info("Copied {}: {}", filename, status.toString());
-        assertTrue(status.isOk());
-    }
-
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/TestServices.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/TestServices.java
deleted file mode 100644
index 9ee67b0..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/TestServices.java
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test;
-
-import com.epam.dlab.automation.cloud.VirtualMachineStatusChecker;
-import com.epam.dlab.automation.docker.Docker;
-import com.epam.dlab.automation.helper.*;
-import com.epam.dlab.automation.http.ApiPath;
-import com.epam.dlab.automation.http.ContentType;
-import com.epam.dlab.automation.http.HttpRequest;
-import com.epam.dlab.automation.http.HttpStatusCode;
-import com.epam.dlab.automation.jenkins.JenkinsService;
-import com.epam.dlab.automation.model.Lib;
-import com.epam.dlab.automation.model.LoginDto;
-import com.epam.dlab.automation.model.NotebookConfig;
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.jayway.restassured.RestAssured;
-import com.jayway.restassured.response.Response;
-import com.jayway.restassured.response.ResponseBody;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.TimeUnit;
-
-import static org.testng.Assert.fail;
-
-@Test(singleThreaded = true)
-public class TestServices {
-
-	private final static Logger LOGGER = LogManager.getLogger(TestServices.class);
-	// This time 3 notebooks are tested in parallel - so 3 threads are used,
-	// restartNotebookAndRedeployToTerminate are a pool for future notebooks grow.
-	// needed to investigate Amazon behaviour when same AIM requests set of
-	// computation resources in parallel
-	// looks like running test in 1 thread mostly succeeds, running in 2 and more
-	// threads - usually fails.
-	private static final int N_THREADS = 10;
-	private static final long NOTEBOOK_CREATION_DELAY = 60000;
-
-	private long testTimeMillis;
-	private List<NotebookConfig> notebookConfigs;
-	private List<Lib> skippedLibs;
-
-
-	@BeforeClass
-	public void Setup() throws IOException {
-		testTimeMillis = System.currentTimeMillis();
-		// Load properties
-		ConfigPropertyValue.getJenkinsJobURL();
-
-		ObjectMapper mapper = new ObjectMapper();
-		notebookConfigs = mapper.readValue(ConfigPropertyValue.getNotebookTemplates(),
-				new TypeReference<ArrayList<NotebookConfig>>() {
-				});
-		skippedLibs = mapper.readValue(ConfigPropertyValue.getSkippedLibs(),
-				new TypeReference<ArrayList<Lib>>() {
-				});
-	}
-
-	@AfterClass
-	public void Cleanup() {
-		testTimeMillis = System.currentTimeMillis() - testTimeMillis;
-		LOGGER.info("Test time {} ms", testTimeMillis);
-	}
-
-	@Test
-	public void runTest() throws Exception {
-		testJenkinsJob();
-		testLoginSsnService();
-
-		RestAssured.baseURI = NamingHelper.getSsnURL();
-		NamingHelper.setSsnToken(ssnLoginAndKeyUpload());
-		runTestsInNotebooks();
-	}
-
-	private void testJenkinsJob() throws Exception {
-		/*
-		 * LOGGER.info("1. Jenkins Job will be started ...");
-		 *
-		 * JenkinsService jenkins = new
-		 * JenkinsService(ConfigPropertyValue.getJenkinsUsername(),
-		 * ConfigPropertyValue.getJenkinsPassword()); String buildNumber =
-		 * jenkins.runJenkinsJob(ConfigPropertyValue.getJenkinsJobURL());
-		 * LOGGER.info("   Jenkins Job has been completed");
-		 */
-
-		LOGGER.info("1. Looking for last Jenkins Job ...");
-		JenkinsService jenkins = new JenkinsService();
-		String buildNumber = jenkins.getJenkinsJob();
-		LOGGER.info("   Jenkins Job found:");
-		LOGGER.info("Build number is: {}", buildNumber);
-
-		NamingHelper.setSsnURL(jenkins.getSsnURL().replaceAll(" ", ""));
-		NamingHelper.setServiceBaseName(jenkins.getServiceBaseName().replaceAll(" ", ""));
-		Assert.assertNotNull(NamingHelper.getSsnURL(), "Jenkins URL was not generated");
-		Assert.assertNotNull(NamingHelper.getServiceBaseName(), "Service BaseName was not generated");
-		LOGGER.info("Self-Service URL is: " + NamingHelper.getSsnURL());
-		LOGGER.info("ServiceBaseName is: " + NamingHelper.getServiceBaseName());
-	}
-
-	private ResponseBody<?> login(String username, String password, int expectedStatusCode, String errorMessage) {
-		final String ssnLoginURL = NamingHelper.getSelfServiceURL(ApiPath.LOGIN);
-		LoginDto requestBody = new LoginDto(username, password);
-		Response response = new HttpRequest().webApiPost(ssnLoginURL, ContentType.JSON, requestBody);
-		LOGGER.info("   login response body for user {} is {}", username, response.getBody().asString());
-		Assert.assertEquals(response.statusCode(), expectedStatusCode, errorMessage);
-		return response.getBody();
-	}
-
-	private void testLoginSsnService() throws Exception {
-
-		String cloudProvider = ConfigPropertyValue.getCloudProvider();
-
-		LOGGER.info("Check status of SSN node on {}: {}", cloudProvider.toUpperCase(), NamingHelper.getSsnName());
-
-		String publicSsnIp = CloudHelper.getInstancePublicIP(NamingHelper.getSsnName(), true);
-		LOGGER.info("Public IP is: {}", publicSsnIp);
-		String privateSsnIp = CloudHelper.getInstancePrivateIP(NamingHelper.getSsnName(), true);
-		LOGGER.info("Private IP is: {}", privateSsnIp);
-		if (publicSsnIp == null || privateSsnIp == null) {
-			Assert.fail("There is not any virtual machine in " + cloudProvider + " with name " + NamingHelper.getSsnName());
-			return;
-		}
-		NamingHelper.setSsnIp(PropertiesResolver.DEV_MODE ? publicSsnIp : privateSsnIp);
-		VirtualMachineStatusChecker.checkIfRunning(NamingHelper.getSsnName(), true);
-		LOGGER.info("{} instance state is running", cloudProvider.toUpperCase());
-
-		LOGGER.info("2. Waiting for SSN service ...");
-		Assert.assertTrue(WaitForStatus.selfService(ConfigPropertyValue.getTimeoutSSNStartup()), "SSN service was " +
-				"not" +
-				" " +
-				"started");
-		LOGGER.info("   SSN service is available");
-
-		LOGGER.info("3. Check login");
-		final String ssnLoginURL = NamingHelper.getSelfServiceURL(ApiPath.LOGIN);
-		LOGGER.info("   SSN login URL is {}", ssnLoginURL);
-
-		ResponseBody<?> responseBody;
-		// TODO Choose username and password for this check
-		// if (!ConfigPropertyValue.isRunModeLocal()) {
-		// responseBody = login(ConfigPropertyValue.getNotIAMUsername(),
-		// ConfigPropertyValue.getNotIAMPassword(),
-		// HttpStatusCode.UNAUTHORIZED, "Unauthorized user " +
-		// ConfigPropertyValue.getNotIAMUsername());
-		// Assert.assertEquals(responseBody.asString(), "Please contact AWS
-		// administrator to create corresponding IAM User");
-		// }
-
-		responseBody = login(ConfigPropertyValue.getNotDLabUsername(), ConfigPropertyValue.getNotDLabPassword(),
-				HttpStatusCode.UNAUTHORIZED, "Unauthorized user " + ConfigPropertyValue.getNotDLabUsername());
-
-		Assert.assertEquals(responseBody.path("message"), "Username or password are not valid");
-
-		if (!ConfigPropertyValue.isRunModeLocal()) {
-			responseBody = login(ConfigPropertyValue.getUsername(), ".", HttpStatusCode.UNAUTHORIZED,
-					"Unauthorized user " + ConfigPropertyValue.getNotDLabUsername());
-			Assert.assertEquals(responseBody.path("message"), "Username or password are not valid");
-		}
-
-		LOGGER.info("Logging in with credentials {}/***", ConfigPropertyValue.getUsername());
-		responseBody = login(ConfigPropertyValue.getUsername(), ConfigPropertyValue.getPassword(), HttpStatusCode.OK,
-				"User login " + ConfigPropertyValue.getUsername() + " was not successful");
-
-		LOGGER.info("4. Check logout");
-		final String ssnlogoutURL = NamingHelper.getSelfServiceURL(ApiPath.LOGOUT);
-		LOGGER.info("   SSN logout URL is {}", ssnlogoutURL);
-
-		Response responseLogout = new HttpRequest().webApiPost(ssnlogoutURL, ContentType.ANY);
-		LOGGER.info("responseLogout.statusCode() is {}", responseLogout.statusCode());
-		Assert.assertEquals(responseLogout.statusCode(), HttpStatusCode.UNAUTHORIZED,
-				"User log out was not successful"/*
-				 * Replace to HttpStatusCode.OK when EPMCBDCCSS-938 will be fixed
-				 * and merged
-				 */);
-	}
-
-	private String ssnLoginAndKeyUpload() throws Exception {
-		LOGGER.info("5. Login as {} ...", ConfigPropertyValue.getUsername());
-		final String ssnLoginURL = NamingHelper.getSelfServiceURL(ApiPath.LOGIN);
-		final String ssnUploadKeyURL = NamingHelper.getSelfServiceURL(ApiPath.UPLOAD_KEY);
-		LOGGER.info("   SSN login URL is {}", ssnLoginURL);
-		LOGGER.info("   SSN upload key URL is {}", ssnUploadKeyURL);
-
-		ResponseBody<?> responseBody = login(ConfigPropertyValue.getUsername(), ConfigPropertyValue.getPassword(),
-				HttpStatusCode.OK, "Failed to login");
-		String token = responseBody.asString();
-		LOGGER.info("   Logged in. Obtained token: {}", token);
-
-		LOGGER.info("5.a Checking for user Key...");
-		Response respCheckKey = new HttpRequest().webApiGet(ssnUploadKeyURL, token);
-
-		if (respCheckKey.getStatusCode() == HttpStatusCode.NOT_FOUND) {
-			LOGGER.info("5.b Upload Key will be started ...");
-
-			Response respUploadKey = new HttpRequest().webApiPost(ssnUploadKeyURL, ContentType.FORMDATA, token);
-			LOGGER.info("   respUploadKey.getBody() is {}", respUploadKey.getBody().asString());
-
-			Assert.assertEquals(respUploadKey.statusCode(), HttpStatusCode.OK, "The key uploading was not successful");
-			int responseCodeAccessKey = WaitForStatus.uploadKey(ssnUploadKeyURL, token, HttpStatusCode.ACCEPTED,
-					ConfigPropertyValue.getTimeoutUploadKey());
-			LOGGER.info("   Upload Key has been completed");
-			LOGGER.info("responseAccessKey.statusCode() is {}", responseCodeAccessKey);
-			Assert.assertEquals(responseCodeAccessKey, HttpStatusCode.OK, "The key uploading was not successful");
-		} else if (respCheckKey.getStatusCode() == HttpStatusCode.OK) {
-			LOGGER.info("   Key has been uploaded already");
-		} else {
-			Assert.assertEquals(200, respCheckKey.getStatusCode(), "Failed to check User Key.");
-		}
-
-		final String nodePrefix = ConfigPropertyValue.getUsernameSimple();
-		Docker.checkDockerStatus(nodePrefix + "_create_edge_", NamingHelper.getSsnIp());
-
-		VirtualMachineStatusChecker.checkIfRunning(NamingHelper.getEdgeName(), true);
-
-		final String ssnExpEnvURL = NamingHelper.getSelfServiceURL(ApiPath.EXP_ENVIRONMENT);
-		LOGGER.info("   SSN exploratory environment URL is {}", ssnExpEnvURL);
-		final String ssnProUserResURL = NamingHelper.getSelfServiceURL(ApiPath.PROVISIONED_RES);
-		LOGGER.info("   SSN provisioned user resources URL is {}", ssnProUserResURL);
-
-		return token;
-	}
-
-	private void populateNotebookConfigWithSkippedLibs(NotebookConfig notebookCfg) {
-		if (Objects.isNull(notebookCfg.getSkippedLibraries())) {
-			notebookCfg.setSkippedLibraries(skippedLibs);
-		}
-	}
-
-	private void runTestsInNotebooks() throws Exception {
-
-		ExecutorService executor = Executors.newFixedThreadPool(
-				ConfigPropertyValue.getExecutionThreads() > 0 ? ConfigPropertyValue.getExecutionThreads() : N_THREADS);
-		notebookConfigs.forEach(this::populateNotebookConfigWithSkippedLibs);
-		List<FutureTask<Boolean>> futureTasks = new ArrayList<>();
-		if (CloudProvider.GCP_PROVIDER.equals(ConfigPropertyValue.getCloudProvider())) {
-			LOGGER.debug("Image creation tests are skipped for all types of notebooks in GCP.");
-			notebookConfigs.forEach(config -> config.setImageTestRequired(false));
-		}
-		LOGGER.info("Testing the following notebook configs: {}", notebookConfigs);
-		for (NotebookConfig notebookConfig : notebookConfigs) {
-			if (!ConfigPropertyValue.isRunModeLocal() &&
-					CloudProvider.AZURE_PROVIDER.equals(ConfigPropertyValue.getCloudProvider())) {
-				LOGGER.debug("Waiting " + NOTEBOOK_CREATION_DELAY / 1000 + " sec to start notebook creation...");
-				TimeUnit.SECONDS.sleep(NOTEBOOK_CREATION_DELAY / 1000);
-			}
-			FutureTask<Boolean> runScenarioTask = new FutureTask<>(new TestCallable(notebookConfig));
-			futureTasks.add(runScenarioTask);
-			executor.execute(runScenarioTask);
-		}
-		final long checkThreadTimeout = ConfigPropertyValue.isRunModeLocal() ? 1000 : 5000;
-		while (true) {
-			boolean done = allScenariosDone(futureTasks);
-			if (done) {
-				verifyResults(futureTasks);
-				executor.shutdown();
-				return;
-			} else {
-				TimeUnit.SECONDS.sleep(checkThreadTimeout / 1000);
-			}
-		}
-	}
-
-	private void verifyResults(List<FutureTask<Boolean>> futureTasks) {
-		List<Exception> resExceptions = new ArrayList<>();
-		for (FutureTask<Boolean> ft : futureTasks) {
-			try {
-				ft.get();
-			} catch (Exception exception) {
-				resExceptions.add(exception);
-			}
-		}
-
-		if (resExceptions.size() > 0) {
-			for (Exception exception : resExceptions) {
-				LOGGER.error("{} :\n {} ", exception, exception.getStackTrace());
-				exception.printStackTrace();
-			}
-			fail("There were failed tests with " + resExceptions.size() + " from " + futureTasks.size()
-					+ " notebooks, see stacktrace above.");
-		}
-	}
-
-	private boolean allScenariosDone(List<FutureTask<Boolean>> futureTasks) {
-		boolean done = true;
-		for (FutureTask<Boolean> ft : futureTasks) {
-			if (!ft.isDone()) {
-				done = ft.isDone();
-			}
-		}
-		return done;
-	}
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/LibraryNotFoundException.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/LibraryNotFoundException.java
deleted file mode 100644
index b8fca93..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/LibraryNotFoundException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs;
-
-class LibraryNotFoundException extends RuntimeException {
-
-	private static final long serialVersionUID = 1L;
-
-	LibraryNotFoundException(String message) {
-		super(message);
-	}
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/LibsHelper.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/LibsHelper.java
deleted file mode 100644
index 471679c..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/LibsHelper.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs;
-
-import com.epam.dlab.automation.helper.NamingHelper;
-
-import static com.epam.dlab.automation.helper.NamingHelper.*;
-
-public class LibsHelper {
-
-	private static final String LIB_GROUPS_JSON = "lib_groups.json";
-	private static final String LIB_LIST_JSON = "lib_list.json";
-
-    public static String getLibGroupsPath(String notebookName){
-		if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(DEEPLEARNING))) {
-			return DEEPLEARNING + "/" + LIB_GROUPS_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(JUPYTER))) {
-			return JUPYTER + "/" + LIB_GROUPS_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(RSTUDIO))) {
-			return RSTUDIO + "/" + LIB_GROUPS_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(TENSOR))) {
-			return TENSOR + "/" + LIB_GROUPS_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(ZEPPELIN))) {
-			return ZEPPELIN + "/" + LIB_GROUPS_JSON;
-		} else return LIB_GROUPS_JSON;
-    }
-
-    public static String getLibListPath(String notebookName){
-		if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(DEEPLEARNING))) {
-			return DEEPLEARNING + "/" + LIB_LIST_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(JUPYTER))) {
-			return JUPYTER + "/" + LIB_LIST_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(RSTUDIO))) {
-			return RSTUDIO + "/" + LIB_LIST_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(TENSOR))) {
-			return TENSOR + "/" + LIB_LIST_JSON;
-		} else if (notebookName.contains(NamingHelper.getSimpleNotebookNames().get(ZEPPELIN))) {
-			return NamingHelper.ZEPPELIN + "/" + LIB_LIST_JSON;
-		} else return LIB_LIST_JSON;
-    }
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibGroupStep.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibGroupStep.java
deleted file mode 100644
index 57d56d4..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibGroupStep.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs;
-
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.epam.dlab.automation.helper.NamingHelper;
-import com.epam.dlab.automation.http.HttpRequest;
-import com.epam.dlab.automation.http.HttpStatusCode;
-import com.epam.dlab.automation.model.JsonMapperDto;
-import com.jayway.restassured.response.Response;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-@TestDescription("Test \"Show available library groups\" ")
-public class TestLibGroupStep extends TestLibStep {
-    private static final Logger LOGGER = LogManager.getLogger(TestLibGroupStep.class);
-    private List<String> expectedGroups;
-
-    public TestLibGroupStep(String url, String token, String notebookName, long initTimeout, String jsonFilePath) {
-        super(NamingHelper.getSelfServiceURL(url), token, notebookName, initTimeout);
-        this.expectedGroups = JsonMapperDto.readListOf(jsonFilePath, String.class);
-    }
-
-    @Override
-    public void init() throws InterruptedException {
-
-        long currentTime = System.currentTimeMillis() / 1000L;
-        long expiredTime = currentTime + initTimeoutSec;
-
-        while (expiredTime > currentTime) {
-            HttpRequest httpRequest = new HttpRequest();
-            
-            Map<String, Object> params = new HashMap<>();
-            params.put("exploratory_name", notebookName);
-			Response groups= httpRequest.webApiGet(url, token,params );
-            if (groups.getStatusCode() != HttpStatusCode.OK) {
-                LOGGER.error("Response status {}, body {}", groups.getStatusCode(), groups.getBody().print());
-                Assert.fail("Cannot get lib groups " + notebookName);
-            } else {
-                List<String> availableGroups = groups.getBody().jsonPath().getList("", String.class);
-
-                if (availableGroups == null || availableGroups.isEmpty()) {
-                    LOGGER.info("Init lib group. Wait for time out {} seconds left for {}", expiredTime - currentTime, notebookName);
-                    TimeUnit.SECONDS.sleep(ConfigPropertyValue.isRunModeLocal() ? 3L : 20L);
-                } else {
-                    return;
-                }
-            }
-
-            currentTime = System.currentTimeMillis() / 1000L;
-        }
-
-        Assert.fail("Timeout Cannot get lib groups " + notebookName);
-    }
-
-    @Override
-    public void verify() {
-        HttpRequest httpRequest = new HttpRequest();
-        
-        Map<String, Object> params = new HashMap<>();
-        params.put("exploratory_name", notebookName);
-		Response response= httpRequest.webApiGet(url, token,params );
-        if (response.getStatusCode() == HttpStatusCode.OK) {
-            List<String> availableGroups = response.getBody().jsonPath().getList("", String.class);
-
-            LOGGER.info("Expected groups {}", expectedGroups);
-
-            LOGGER.info("Available groups {}", availableGroups);
-
-            for (String lib : expectedGroups) {
-                Assert.assertTrue(availableGroups.contains(lib), String.format("%s lib groups is not available for %s", lib, notebookName));
-            }
-
-        } else {
-            LOGGER.error("Response status {}, body {}", response.getStatusCode(), response.getBody().print());
-            Assert.fail("Lib group request failed for " + notebookName);
-        }
-
-        LOGGER.info(getDescription() + "passed");
-    }
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibInstallStep.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibInstallStep.java
deleted file mode 100644
index 9b9d521..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibInstallStep.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs;
-
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.epam.dlab.automation.helper.NamingHelper;
-import com.epam.dlab.automation.http.ContentType;
-import com.epam.dlab.automation.http.HttpRequest;
-import com.epam.dlab.automation.http.HttpStatusCode;
-import com.epam.dlab.automation.model.Lib;
-import com.epam.dlab.automation.test.libs.models.LibInstallRequest;
-import com.epam.dlab.automation.test.libs.models.LibStatusResponse;
-import com.epam.dlab.automation.test.libs.models.LibraryStatus;
-import com.jayway.restassured.response.Response;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-
-@TestDescription("Test \"Install libraries\" ")
-public class TestLibInstallStep extends TestLibStep {
-    private final static Logger LOGGER = LogManager.getLogger(TestLibInstallStep.class);
-    private String statusUrl;
-    private Lib libToInstall;
-	private boolean isInstalled = true;
-
-	public TestLibInstallStep(String requestUrl, String statusUrl, String token, String notebookName, long
-			initTimeoutSec,
-							  Lib libToInstall) {
-
-        super(NamingHelper.getSelfServiceURL(requestUrl), token, notebookName, initTimeoutSec);
-        this.statusUrl = NamingHelper.getSelfServiceURL(statusUrl);
-        this.libToInstall = libToInstall;
-    }
-
-    @Override
-    public void init() throws InterruptedException {
-        LibInstallRequest request = new LibInstallRequest(Collections.singletonList(libToInstall), notebookName);
-
-        LOGGER.info("Install lib {}", request);
-
-        long currentTime = System.currentTimeMillis() / 1000L;
-        long expiredTime = currentTime + initTimeoutSec;
-
-        Response response = new HttpRequest().webApiPost(url, ContentType.JSON, request, token);
-        if (response.getStatusCode() != HttpStatusCode.OK) {
-            LOGGER.error("Response status {}, body {}", response.getStatusCode(), response.getBody().print());
-            Assert.fail("Cannot install libs for " + request);
-        }
-
-        while (expiredTime > currentTime) {
-
-            HttpRequest httpRequest = new HttpRequest();
-            Map<String,Object> params = new HashMap<>();
-            params.put("exploratory_name", notebookName);
-            response = httpRequest.webApiGet(statusUrl, token,params);
-            if (response.getStatusCode() == HttpStatusCode.OK) {
-
-                List<LibStatusResponse> actualStatuses = Arrays.asList(response.getBody().as(LibStatusResponse[].class));
-
-                LOGGER.info("Actual statuses {}", actualStatuses);
-
-                LibStatusResponse s = actualStatuses.stream()
-                        .filter(e -> e.getGroup().equals(libToInstall.getGroup())
-                                && e.getName().equals(libToInstall.getName())
-                                && (e.getVersion().equals(libToInstall.getVersion()) || "N/A".equals(libToInstall.getVersion())))
-						.findFirst().orElseThrow(() -> new LibraryNotFoundException(String.format("Library " +
-										"template with parameters: group=%s, name=%s, version=%s not found.",
-								libToInstall.getGroup(), libToInstall.getName(), libToInstall.getVersion())));
-
-                LOGGER.info("Lib status is {}", s);
-                
-                boolean allLibStatusesDone = true;
-                
-                for (LibraryStatus libStatus : s.getStatus()) {
-                	if (libStatus.getStatus().equals("installing")) {
-                		allLibStatusesDone = false;
-                    } 
-				}
-                if(!allLibStatusesDone) {
-                	LOGGER.info("Wait {} sec left for installation libs {}", expiredTime - currentTime, request);
-                    TimeUnit.SECONDS.sleep(ConfigPropertyValue.isRunModeLocal() ? 3L : 20L);
-                } else {
-                    return;
-                }
-                
-            } else {
-                LOGGER.error("Response status{}, body {}", response.getStatusCode(), response.getBody().print());
-                Assert.fail("Install libs failed for " + notebookName);
-            }
-
-            currentTime = System.currentTimeMillis() / 1000L;
-        }
-
-        Assert.fail("Timeout Cannot install libs on " + notebookName + " " + request);
-    }
-
-    @Override
-    public void verify() {
-        HttpRequest httpRequest = new HttpRequest();
-        Map<String,Object> params = new HashMap<>();
-        params.put("exploratory_name", notebookName);
-        Response response = httpRequest.webApiGet(statusUrl, token,params);
-        if (response.getStatusCode() == HttpStatusCode.OK) {
-
-            List<LibStatusResponse> actualStatuses = Arrays.asList(response.getBody().as(LibStatusResponse[].class));
-            LOGGER.info("Actual statuses {}", actualStatuses);
-
-            LibStatusResponse libStatusResponse = actualStatuses.stream()
-                    .filter(e -> e.getGroup().equals(libToInstall.getGroup())
-                            && e.getName().equals(libToInstall.getName())
-                            && (e.getVersion().equals(libToInstall.getVersion()) || "N/A".equals(libToInstall.getVersion())))
-					.findFirst().orElseThrow(() -> new LibraryNotFoundException(String.format("Library " +
-									"template with parameters: group=%s, name=%s, version=%s not found.",
-							libToInstall.getGroup(), libToInstall.getName(), libToInstall.getVersion())));
-
-            for (LibraryStatus libStatus : libStatusResponse.getStatus()) {
-            	if ("installed".equals(libStatus.getStatus())) {
-                    LOGGER.info("Library status of {} is {}", libToInstall, libStatusResponse);
-                } else if ("failed".equals(libStatus.getStatus())) {
-                    LOGGER.warn("Failed status with proper error message happend for {}", libStatusResponse);
-					isInstalled = false;
-                } else {
-					Assert.assertEquals("installed", libStatus.getStatus(), "Lib " + libToInstall + " is not " +
-							"installed" +
-							". Status " + libStatusResponse);
-                }
-			}
-        } else {
-            LOGGER.error("Response status{}, body {}", response.getStatusCode(), response.getBody().print());
-            Assert.fail("Install libs failed for " + notebookName);
-        }
-        LOGGER.info(getDescription() + "passed");
-    }
-
-	public boolean isLibraryInstalled() {
-		return isInstalled;
-	}
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibListStep.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibListStep.java
deleted file mode 100644
index 89566c2..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibListStep.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs;
-
-import com.epam.dlab.automation.helper.ConfigPropertyValue;
-import com.epam.dlab.automation.helper.NamingHelper;
-import com.epam.dlab.automation.http.ContentType;
-import com.epam.dlab.automation.http.HttpRequest;
-import com.epam.dlab.automation.http.HttpStatusCode;
-import com.epam.dlab.automation.model.Lib;
-import com.epam.dlab.automation.test.libs.models.LibSearchRequest;
-import com.epam.dlab.automation.test.libs.models.LibToSearchData;
-import com.jayway.restassured.response.Response;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.testng.Assert;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-@TestDescription("Test \"Search libraries by group and prefix\" ")
-public class TestLibListStep extends TestLibStep {
-	private static final Logger LOGGER = LogManager.getLogger(TestLibListStep.class);
-	private LibToSearchData libToSearchData;
-	private List<Lib> libs = new ArrayList<>();
-
-	public TestLibListStep(String url, String token, String notebookName, long initTimeoutSec,
-						   LibToSearchData libToSearchData) {
-		super(NamingHelper.getSelfServiceURL(url), token, notebookName, initTimeoutSec);
-		this.libToSearchData = libToSearchData;
-	}
-
-	@Override
-	public void init() throws InterruptedException {
-		LibSearchRequest request = new LibSearchRequest(notebookName, libToSearchData.getGroup(),
-				libToSearchData.getStartWith());
-
-		long currentTime = System.currentTimeMillis() / 1000L;
-		long expiredTime = currentTime + initTimeoutSec;
-
-		while (expiredTime > currentTime) {
-			Response response = new HttpRequest().webApiPost(url, ContentType.JSON, request, token);
-			LOGGER.info("Request libraries {}", request);
-
-			if (response.getStatusCode() != HttpStatusCode.OK) {
-				LOGGER.error("Response status {}, body {}", response.getStatusCode(), response.getBody().print());
-				Assert.fail("Cannot get lib list for " + request);
-			} else {
-				Map<String, String> foundLibs =
-						getLibMap(response);
-				if (foundLibs == null || foundLibs.isEmpty()) {
-					LOGGER.info("Init lib list. Wait for time out {} seconds left for {}", expiredTime - currentTime,
-							notebookName);
-					TimeUnit.SECONDS.sleep(ConfigPropertyValue.isRunModeLocal() ? 3L : 20L);
-				} else {
-					return;
-				}
-			}
-
-			currentTime = System.currentTimeMillis() / 1000L;
-		}
-
-		Assert.fail("Timeout Cannot get lib list " + notebookName);
-	}
-
-	private Map<String, String> getLibMap(Response response) {
-		return response.getBody().jsonPath().getList("")
-				.stream()
-				.collect(Collectors.toMap(o -> (String) ((Map) o).get("name"),
-						o -> (String) ((Map) o).get("version")));
-	}
-
-	@Override
-	public void verify() {
-		Map<String, String> actualFoundLibs = new HashMap<>();
-
-		LibSearchRequest request = new LibSearchRequest(notebookName, libToSearchData.getGroup(),
-				libToSearchData.getStartWith());
-		Response response = new HttpRequest().webApiPost(url, ContentType.JSON, request, token);
-		LOGGER.info("Request libraries {}", request);
-		if (response.getStatusCode() == HttpStatusCode.OK) {
-			actualFoundLibs = getLibMap(response);
-			if (actualFoundLibs == null || actualFoundLibs.isEmpty()) {
-				Assert.fail("Libraries not found");
-			} else {
-				LOGGER.info("Found libraries for {} are {}", request, actualFoundLibs);
-				for (Map.Entry<String, String> entry : actualFoundLibs.entrySet()) {
-					Assert.assertTrue(entry.getKey().toLowerCase().startsWith(libToSearchData.getStartWith().toLowerCase()),
-							String.format("Nor expected lib is found %s-%s", entry.getKey(), entry.getValue()));
-				}
-				LOGGER.info("Libraries are verified");
-			}
-
-		} else {
-			LOGGER.error("Response {}", response);
-			Assert.fail("Lib list request failed for " + request);
-		}
-		LOGGER.info(getDescription() + "passed");
-
-		for (Map.Entry<String, String> entry : actualFoundLibs.entrySet()) {
-			libs.add(new Lib(libToSearchData.getGroup(), entry.getKey(), entry.getValue()));
-		}
-	}
-
-	public List<Lib> getLibs() {
-		return libs;
-	}
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibStep.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibStep.java
deleted file mode 100644
index 5930f77..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestLibStep.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs;
-
-import java.lang.annotation.Annotation;
-import java.util.concurrent.TimeUnit;
-
-abstract class TestLibStep {
-    final String url;
-    final String token;
-    final String notebookName;
-    final long initTimeoutSec; //seconds
-
-    TestLibStep(String url, String token, String notebookName, long initTimeoutSec) {
-        this.url = url;
-        this.token = token;
-        this.notebookName = notebookName;
-        this.initTimeoutSec = initTimeoutSec;
-    }
-
-    public abstract void verify();
-
-    String getDescription() {
-        Annotation annotation = getClass().getAnnotation(TestDescription.class);
-        return (annotation != null) ? ((TestDescription) annotation).value() : "";
-    }
-
-    public void init() throws InterruptedException {
-        if (initTimeoutSec != 0L) {
-            TimeUnit.SECONDS.sleep(initTimeoutSec);
-        }
-    }
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibInstallRequest.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibInstallRequest.java
deleted file mode 100644
index ad48b07..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibInstallRequest.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs.models;
-
-import com.epam.dlab.automation.model.Lib;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
-import java.util.List;
-
-
-public class LibInstallRequest {
-	@JsonProperty
-	private List<Lib> libs;
-	@JsonProperty("exploratory_name")
-	private String notebookName;
-
-	public LibInstallRequest(List<Lib> libs, String notebookName) {
-		this.libs = libs;
-		this.notebookName = notebookName;
-	}
-
-	public List<Lib> getLibs() {
-		return libs;
-	}
-
-	public String getNotebookName() {
-		return notebookName;
-	}
-
-	@Override
-	public String toString() {
-		return MoreObjects.toStringHelper(this)
-				.add("libs", libs)
-				.add("notebookName", notebookName)
-				.toString();
-	}
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibSearchRequest.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibSearchRequest.java
deleted file mode 100644
index 45ffa32..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibSearchRequest.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs.models;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
-/**
- * Created by yu on 7/3/17.
- */
-public class LibSearchRequest {
-    @JsonProperty("exploratory_name")
-    private String notebookName;
-    @JsonProperty
-    private String group;
-    @JsonProperty("start_with")
-    private String startWith;
-
-    public LibSearchRequest() {
-    }
-
-    public LibSearchRequest(String notebookName, String group, String startWith) {
-        this.notebookName = notebookName;
-        this.group = group;
-        this.startWith = startWith;
-    }
-
-    @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this)
-                .add("notebookName", notebookName)
-                .add("group", group)
-                .add("startWith", startWith)
-                .toString();
-    }
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibStatusResponse.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibStatusResponse.java
deleted file mode 100644
index cf79d82..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibStatusResponse.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs.models;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
-import java.util.List;
-
-@JsonIgnoreProperties(ignoreUnknown = true)
-public class LibStatusResponse {
-    @JsonProperty
-    private String group;
-    @JsonProperty
-    private String name;
-    @JsonProperty
-    private String version;
-    @JsonProperty
-    private List<LibraryStatus> status;
-
-    public String getGroup() {
-        return group;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public String getVersion() {
-        return version;
-    }
-
-    public List<LibraryStatus> getStatus() {
-        return status;
-    }
-
-
-    @Override
-    public boolean equals(Object o) {
-		if (this == o) return true;
-		if (o == null || getClass() != o.getClass()) return false;
-
-		LibStatusResponse that = (LibStatusResponse) o;
-
-		return (group != null ? group.equals(that.group) : that.group == null) && (name != null ? name.equals(that
-				.name) : that.name == null) && (version != null ? version.equals(that.version) : that.version == null)
-				&& (status != null ? status.equals(that.status) : that.status == null);
-	}
-
-    @Override
-    public int hashCode() {
-        int result = group != null ? group.hashCode() : 0;
-        result = 31 * result + (name != null ? name.hashCode() : 0);
-        result = 31 * result + (version != null ? version.hashCode() : 0);
-        result = 31 * result + (status != null ? status.hashCode() : 0);
-        return result;
-    }
-
-    @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this)
-                .add("group", group)
-                .add("name", name)
-                .add("version", version)
-                .add("status", status)
-                .toString();
-    }
-}
-
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibToSearchData.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibToSearchData.java
deleted file mode 100644
index e6aa205..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibToSearchData.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs.models;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class LibToSearchData {
-    @JsonProperty
-    private String group;
-    @JsonProperty("start_with")
-    private String startWith;
-
-    public String getGroup() {
-        return group;
-    }
-
-    public String getStartWith() {
-        return startWith;
-    }
-}
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibraryStatus.java b/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibraryStatus.java
deleted file mode 100644
index 1be3139..0000000
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/models/LibraryStatus.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.automation.test.libs.models;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
-public class LibraryStatus {
-	    @JsonProperty
-	    private String resource;
-	    @JsonProperty
-	    private String resourceType;
-	    @JsonProperty
-	    private String status;
-	    @JsonProperty
-	    private String error;
-	    
-		public String getResource() {
-			return resource;
-		}
-	    public String getResourceType() { return resourceType;}
-	    public String getStatus() {
-			return status;
-		}
-		public String getError() {
-			return error;
-		}
-		
-		@Override
-		public int hashCode() {
-			final int prime = 31;
-			int result = 1;
-			result = prime * result + ((error == null) ? 0 : error.hashCode());
-			result = prime * result + ((resource == null) ? 0 : resource.hashCode());
-			result = prime * result + ((status == null) ? 0 : status.hashCode());
-			result = prime * result + ((resourceType == null) ? 0 : resourceType.hashCode());
-			return result;
-		}
-		@Override
-		public boolean equals(Object obj) {
-			if (this == obj)
-				return true;
-			if (obj == null)
-				return false;
-			if (getClass() != obj.getClass())
-				return false;
-			LibraryStatus other = (LibraryStatus) obj;
-			if (error == null) {
-				if (other.error != null)
-					return false;
-			} else if (!error.equals(other.error))
-				return false;
-			if (resource == null) {
-				if (other.resource != null)
-					return false;
-			} else if (!resource.equals(other.resource))
-				return false;
-			if (status == null) {
-				if (other.status != null)
-					return false;
-			} else if (!status.equals(other.status))
-				return false;
-			if (resourceType == null) {
-				return other.resourceType == null;
-			} else return resourceType.equals(other.resourceType);
-		}
-		@Override
-		public String toString() {
-			return MoreObjects.toStringHelper(this)
-					.add("resource", resource)
-					.add("resourceType", resourceType)
-					.add("status", status)
-					.add("error", error)
-					.toString();
-		}
-	    
-	    
-}
diff --git a/integration-tests/src/test/resources/log4j2.xml b/integration-tests/src/test/resources/log4j2.xml
deleted file mode 100644
index 91d23a2..0000000
--- a/integration-tests/src/test/resources/log4j2.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~   http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing,
-  ~ software distributed under the License is distributed on an
-  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  ~ KIND, either express or implied.  See the License for the
-  ~ specific language governing permissions and limitations
-  ~ under the License.
-  -->
-
-<Configuration>
-
-	<appender name="console" class="org.apache.log4j.ConsoleAppender">
-		<layout class="org.apache.log4j.PatternLayout">
-			<param name="ConversionPattern"
-				   value="%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n" />
-		</layout>
-	</appender>
-
-	<appender name="file" class="org.apache.log4j.FileAppender">
-
-		<param name="file" value="FILE.log"/>
-		<param name="immediateFlush" value="true"/>
-		<param name="threshold" value="debug"/>
-		<param name="append" value="false"/>
-
-		<layout class="org.apache.log4j.PatternLayout">
-			<param name="conversionPattern" value="%m%n"/>
-		</layout>
-	</appender>
-
-	<Appenders>
-		<Console name="console" target="SYSTEM_OUT">
-			<PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
-		</Console>
-			<File name="file" fileName="output.log" bufferedIO="false" advertiseURI="file:log.log" advertise="true">
-			</File>
-	</Appenders>
-
-
-	<Loggers>
-		<Root level="info">
-			<AppenderRef ref="file" />
-			<AppenderRef ref="console" />
-		</Root>
-		<Logger name="com.epam.dlab.automation" level="debug" additivity="false">
-			<AppenderRef ref="file" />
-			<AppenderRef ref="console" />
-    	</Logger>
-	</Loggers>
-
-</Configuration>
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 853ef15..46b9616 100644
--- a/pom.xml
+++ b/pom.xml
@@ -74,7 +74,7 @@
         <com.google.inject.version>4.2.0</com.google.inject.version>
         <dropwizard-template-config.version>1.4.0</dropwizard-template-config.version>
         <com.aegisql.conveyor.version>1.1.7</com.aegisql.conveyor.version>
-        <org.mongodb.version>3.3.0</org.mongodb.version>
+        <org.mongodb.version>3.8.2</org.mongodb.version>
         <junit.version>4.12</junit.version>
         <org.mockito.version>1.10.19</org.mockito.version>
         <java.version>1.8</java.version>
@@ -240,6 +240,7 @@
                         <exclude>**/*.ipynb</exclude>
                         <exclude>**/*.iml</exclude>
                         <exclude>**/*.json</exclude>
+                        <exclude>**/*.json.tpl</exclude>
                         <exclude>**/*.r</exclude>
                         <exclude>**/__init__.py</exclude>
                         <exclude>**/*.conf</exclude>
diff --git a/services/billing-aws/Dockerfile b/services/billing-aws/Dockerfile
index 66d2812..b41a919 100644
--- a/services/billing-aws/Dockerfile
+++ b/services/billing-aws/Dockerfile
@@ -23,6 +23,6 @@
 
 USER root
 
-COPY billing-aws-2.1.jar /root/
+COPY billing-aws-2.2.jar /root/
 
-CMD java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 /root/billing-aws-2.1.jar --conf /root/billing.yml
\ No newline at end of file
+CMD java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 /root/billing-aws-2.2.jar --conf /root/billing.yml
\ No newline at end of file
diff --git a/services/billing-aws/billing.yml b/services/billing-aws/billing.yml
index ee70487..3b1943f 100644
--- a/services/billing-aws/billing.yml
+++ b/services/billing-aws/billing.yml
@@ -26,43 +26,40 @@
 
 billingEnabled: true
 
-host: localhost
+host: MONGO_HOST
 port: 27017
 username: admin
-password: <MONGODB_PASSWORD>
+password: MONGO_PASSWORD
 database: dlabdb
 
-scheduler:
-# Schedule is comma separated values of time in format hh[:mm[:ss]]. hh - in the 24-hour clock, at 8:15PM is 20:15.
-  schedule: 0:00, 3:00, 6:00, 9:00, 12:00, 15:00, 18:00, 21:00
-
 # Adapter for reading source data. Known types: file, s3file
 adapterIn:
   - type: s3file
-    bucket: <BILLING_BUCKET_NAME>
-    path: <REPORT_PATH>
-    awsJobEnabled: <AWS_JOB_ENABLED>
-    accountId: <ACCOUNT_ID>
-    accessKeyId: <ACCESS_KEY_ID>
-    secretAccessKey: <SECRET_ACCESS_KEY>
+    bucket: BILLING_BUCKET_NAME
+    path: REPORT_PATH
+    awsJobEnabled: AWS_JOB_ENABLED
+    accountId: ACCOUNT_ID
+    accessKeyId: ACCESS_KEY_ID
+    secretAccessKey: SECRET_ACCESS_KEY
 
 # Adapter for writing converted data. Known types: console, file, s3file, mongodb
 adapterOut:
   - type: mongodlab
-    host: localhost
+    host: MONGO_HOST
     port: 27017
     username: admin
-    password: <MONGODB_PASSWORD>
+    password: MONGO_PASSWORD
     database: dlabdb
 #    bufferSize: 10000
     upsert: true
+    serviceBaseName: SERVICE_BASE_NAME
 
 # Filter for source and converted data.
 filter:
   - type: aws
     currencyCode: USD
-    columnDlabTag: <CONF_BILLING_TAG>
-    serviceBaseName: <CONF_SERVICE_BASE_NAME>
+    columnDlabTag: CONF_BILLING_TAG
+    serviceBaseName: SERVICE_BASE_NAME
 
 
 # Parser of source data to common format.
@@ -71,9 +68,9 @@
     headerLineNo: 1
     skipLines: 1
     columnMapping: >-
-      dlab_id=<DLAB_ID>;usage_date=<USAGE_DATE>;product=<PRODUCT>;
-      usage_type=<USAGE_TYPE>;usage=<USAGE>;cost=<COST>;
-      resource_id=<RESOURCE_ID>;tags=<TAGS>
+      dlab_id=DLAB_ID;usage_date=USAGE_DATE;product=PRODUCT;
+      usage_type=USAGE_TYPE;usage=USAGE;cost=COST;
+      resource_id=RESOURCE_ID;tags=TAGS
     aggregate: day
 
 
@@ -94,4 +91,4 @@
       currentLogFilename: /var/opt/dlab/log/ssn/billing.log
       archive: true
       archivedLogFilenamePattern: /var/opt/dlab/log/ssn/billing-%d{yyyy-MM-dd}.log.gz
-      archivedFileCount: 10
+      archivedFileCount: 10
\ No newline at end of file
diff --git a/services/billing-aws/pom.xml b/services/billing-aws/pom.xml
index a411c20..ec4c830 100644
--- a/services/billing-aws/pom.xml
+++ b/services/billing-aws/pom.xml
@@ -28,7 +28,7 @@
         <version>1.0</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
-    
+
     <artifactId>billing-aws</artifactId>
 
     <properties>
@@ -37,6 +37,25 @@
         <org.freemarker.version>2.3.22</org.freemarker.version>
     </properties>
 
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-dependencies</artifactId>
+                <version>2.1.3.RELEASE</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+            <dependency>
+                <groupId>org.keycloak.bom</groupId>
+                <artifactId>keycloak-adapter-bom</artifactId>
+                <version>4.8.3.Final</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+
     <dependencies>
         <dependency>
             <groupId>com.epam.dlab</groupId>
@@ -64,11 +83,6 @@
             <version>${org.freemarker.version}</version>
         </dependency>
         <dependency>
-            <groupId>com.epam.dlab</groupId>
-            <artifactId>dlab-model</artifactId>
-            <version>${project.parent.version}</version>
-        </dependency>
-        <dependency>
             <groupId>javax.validation</groupId>
             <artifactId>validation-api</artifactId>
             <version>2.0.0.Final</version>
@@ -119,80 +133,62 @@
             <artifactId>guava</artifactId>
             <version>24.1-jre</version>
         </dependency>
+
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-configuration-processor</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-data-mongodb</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-web</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-security</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.keycloak</groupId>
+            <artifactId>keycloak-spring-boot-starter</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.keycloak</groupId>
+            <artifactId>keycloak-spring-security-adapter</artifactId>
+            <version>4.8.3.Final</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-test</artifactId>
+            <scope>test</scope>
+        </dependency>
+
         <dependency>
             <groupId>org.mockito</groupId>
             <artifactId>mockito-core</artifactId>
             <version>${org.mockito.version}</version>
             <scope>test</scope>
         </dependency>
-
+        <dependency>
+            <groupId>com.epam.dlab</groupId>
+            <artifactId>dlab-model</artifactId>
+            <version>${project.parent.version}</version>
+        </dependency>
 
     </dependencies>
     <build>
         <plugins>
-             <plugin>
-                <artifactId>maven-shade-plugin</artifactId>
-                <version>${maven-shade-plugin.version}</version>
+            <plugin>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-maven-plugin</artifactId>
                 <executions>
                     <execution>
-                        <phase>package</phase>
                         <goals>
-                            <goal>shade</goal>
+                            <goal>repackage</goal>
                         </goals>
-                        <configuration>
-                            <createDependencyReducedPom>false</createDependencyReducedPom>
-                            <minimizeJar>false</minimizeJar>
-                            <filters>
-                                <filter>
-                                    <artifact>org.hibernate:hibernate-validator</artifact>
-                                    <includes>**</includes>
-                                </filter>
-                                <filter>
-                                    <artifact>org.glassfish.web:javax.el</artifact>
-                                    <includes>**</includes>
-                                </filter>
-                                <filter>
-                                    <artifact>org.jboss.logging:jboss-logging</artifact>
-                                    <includes>**</includes>
-                                </filter>
-                                <filter>
-                                    <artifact>com.fasterxml:classmate</artifact>
-                                    <includes>**</includes>
-                                </filter>
-
-                                <filter>
-                                    <artifact>javax.validation:validation-api</artifact>
-                                    <includes>**</includes>
-                                </filter>
-                                <filter>
-                                    <artifact>commons-logging:commons-logging</artifact>
-                                    <includes>**</includes>
-                                </filter>
-                                <filter>
-                                    <artifact>com.fasterxml.jackson.core:jackson-databind</artifact>
-                                    <includes>**</includes>
-                                </filter>
-                            </filters>
-                            <transformers>
-                                <transformer
-                                    	implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
-                                <transformer
-                                    	implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
-                                    <mainClass>com.epam.dlab.BillingScheduler</mainClass>
-									<manifestEntries>
-										<Created-By>&lt;EPAM&gt; Systems</Created-By>
-										<Name>com/epam/dlab</Name>
-										<Implementation-Title>DLab Billing Tool</Implementation-Title>
-										<Implementation-Version>${dlab.version}</Implementation-Version>
-										<Implementation-Vendor>&lt;EPAM&gt; Systems</Implementation-Vendor>
-										<Build-Time>${maven.build.timestamp}</Build-Time>
-										<Build-OS>${os.name}</Build-OS>
-										<GIT-Branch>${scmBranch}</GIT-Branch>
-										<GIT-Commit>${buildNumber}</GIT-Commit>
-									</manifestEntries>
-                                </transformer>
-                            </transformers>
-                        </configuration>
                     </execution>
                 </executions>
             </plugin>
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/BillingAwsApplication.java b/services/billing-aws/src/main/java/com/epam/dlab/BillingAwsApplication.java
new file mode 100644
index 0000000..c878370
--- /dev/null
+++ b/services/billing-aws/src/main/java/com/epam/dlab/BillingAwsApplication.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab;
+
+import com.epam.dlab.exceptions.InitializationException;
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.data.mongodb.repository.config.EnableMongoRepositories;
+
+@SpringBootApplication
+@EnableMongoRepositories
+@EnableConfigurationProperties
+public class BillingAwsApplication {
+
+    public static void main(String[] args) throws InitializationException {
+        SpringApplication.run(BillingAwsApplication.class, args);
+        BillingServiceImpl.startApplication(args);
+    }
+}
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/BillingScheduler.java b/services/billing-aws/src/main/java/com/epam/dlab/BillingScheduler.java
deleted file mode 100644
index 5db8269..0000000
--- a/services/billing-aws/src/main/java/com/epam/dlab/BillingScheduler.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab;
-
-import com.epam.dlab.configuration.BillingToolConfiguration;
-import com.epam.dlab.configuration.BillingToolConfigurationFactory;
-import com.epam.dlab.configuration.SchedulerConfiguration;
-import com.epam.dlab.core.parser.ParserBase;
-import com.epam.dlab.exceptions.AdapterException;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.exceptions.InitializationException;
-import com.epam.dlab.exceptions.ParseException;
-import com.epam.dlab.util.ServiceUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Arrays;
-
-/**
- * Billing scheduler for loading billing report.
- */
-public class BillingScheduler implements Runnable {
-	private static final Logger LOGGER = LoggerFactory.getLogger(BillingScheduler.class);
-
-	/**
-	 * Timeout for check the schedule in milliseconds.
-	 */
-	private static final long CHECK_TIMEOUT_MILLIS = 60000;
-
-	/**
-	 * Billing scheduler instance.
-	 */
-	private static BillingScheduler scheduler;
-	private final boolean enabled;
-	private final BillingToolConfiguration configuration;
-
-	/**
-	 * Starts the scheduler for given configuration.
-	 *
-	 * @param filename the name of file for billing configuration.
-	 * @throws InitializationException
-	 */
-	public static void start(String filename) throws InitializationException {
-		if (scheduler == null) {
-			scheduler = new BillingScheduler(filename);
-			scheduler.thread.start();
-		} else {
-			LOGGER.debug("Billing scheduler already started");
-		}
-	}
-
-	/**
-	 * Stops the scheduler.
-	 */
-	public static void stop() {
-		if (scheduler.thread != null) {
-			LOGGER.debug("Billing scheduler will be stopped ...");
-			synchronized (scheduler.thread) {
-				scheduler.thread.interrupt();
-				scheduler.thread = null;
-			}
-			LOGGER.info("Scheduler has been stopped");
-		}
-	}
-
-
-	/**
-	 * Thread of the scheduler.
-	 */
-	private Thread thread = new Thread(this, this.getClass().getSimpleName());
-
-	/**
-	 * Name of configuration file.
-	 */
-	private final String confFilename;
-
-	/**
-	 * Current schedule.
-	 */
-	private SchedulerConfiguration schedule;
-
-	/**
-	 * Instantiate billing scheduler for given configuration.
-	 *
-	 * @param filename the name of file for billing configuration.
-	 * @throws InitializationException
-	 */
-	public BillingScheduler(String filename) throws InitializationException {
-		this.confFilename = filename;
-		LOGGER.debug("Billing report configuration file: {}", filename);
-		configuration = BillingToolConfigurationFactory.build(confFilename, BillingToolConfiguration.class);
-		this.enabled = configuration.isBillingEnabled();
-		setSchedule(configuration);
-	}
-
-	/**
-	 * Loads the billing report.
-	 *
-	 * @throws InitializationException
-	 * @throws AdapterException
-	 * @throws ParseException
-	 */
-	private void load() throws InitializationException, AdapterException, ParseException {
-		ParserBase parser = configuration.build();
-		long time = schedule.getNearTime().getTimeInMillis();
-		if (setSchedule(configuration)) {
-			if (time != schedule.getNearTime().getTimeInMillis()) {
-				LOGGER.info("Previous billing schedule has been canceled");
-				return;
-			}
-		}
-
-		LOGGER.info("Try to laod billing report for configuration: {}", configuration);
-		parser.parse();
-		if (!parser.getStatistics().isEmpty()) {
-			LOGGER.info("Billing report parser statistics:");
-			for (int i = 0; i < parser.getStatistics().size(); i++) {
-				LOGGER.info("  {}", parser.getStatistics().get(i).toString());
-			}
-		}
-	}
-
-	/**
-	 * Read the schedule from configuration.
-	 *
-	 * @param configuration the billing configuration.
-	 * @return <b>true>/b> if new schedule was loaded, otherwise <b>false</b>.
-	 * @throws InitializationException
-	 */
-	private boolean setSchedule(BillingToolConfiguration configuration) throws InitializationException {
-		SchedulerConfiguration schedulerConfiguration = configuration.getScheduler();
-		boolean isModified = false;
-		if (schedulerConfiguration == null) {
-			throw new InitializationException(String.format("Schedule of billing report in configuration file \"%s " +
-					"not found", confFilename));
-		}
-		if (this.schedule == null) {
-			isModified = true;
-			LOGGER.debug("Billing report schedule: {}", schedulerConfiguration);
-		} else {
-			this.schedule.adjustStartTime();
-			if (!schedulerConfiguration.equals(this.schedule)) {
-				isModified = true;
-				LOGGER.debug("New billing report schedule has been loaded: {}", schedulerConfiguration);
-			}
-		}
-
-		try {
-			this.schedule = new SchedulerConfiguration();
-			this.schedule.setSchedule(schedulerConfiguration.getSchedule());
-			this.schedule.build();
-		} catch (Exception e) {
-			throw new InitializationException("Cannot configure billing scheduler. " + e.getLocalizedMessage(), e);
-		}
-
-		return isModified;
-	}
-
-	@Override
-	public void run() {
-		if (enabled) {
-			LOGGER.info("Billing scheduler has been started");
-			long startTimeMillis = schedule.getNextTime().getTimeInMillis();
-			long timeMillis;
-			LOGGER.info("Billing report will be loaded at {}", schedule.getNextTime().getTime());
-
-			try {
-				while (!Thread.currentThread().isInterrupted()) {
-					if (startTimeMillis <= System.currentTimeMillis()) {
-						try {
-							LOGGER.debug("Try to load billing report for schedule {}",
-									schedule.getNextTime().getTime());
-							load();
-						} catch (InitializationException | AdapterException | ParseException e) {
-							LOGGER.error("Error loading billing report: {}", e.getLocalizedMessage(), e);
-						}
-						startTimeMillis = schedule.getNextTime().getTimeInMillis();
-						LOGGER.info("Billing report will be loaded at {}", schedule.getNextTime().getTime());
-					} else {
-						schedule.adjustStartTime();
-						timeMillis = schedule.getNextTime().getTimeInMillis();
-						if (startTimeMillis != timeMillis) {
-							LOGGER.info("Billing report will be loaded at {}", schedule.getNextTime().getTime());
-							startTimeMillis = timeMillis;
-						}
-					}
-
-					try {
-						timeMillis = startTimeMillis - System.currentTimeMillis();
-						if (timeMillis > 0) {
-							timeMillis = Math.min(CHECK_TIMEOUT_MILLIS, timeMillis);
-							Thread.sleep(timeMillis);
-						}
-					} catch (InterruptedException e) {
-						LOGGER.warn("Billing scheduler interrupted", e);
-						Thread.currentThread().interrupt();
-					}
-				}
-			} catch (Exception e) {
-				LOGGER.error("Unhandled billing report error: {}", e.getLocalizedMessage(), e);
-			}
-			LOGGER.info("Scheduler has been stopped");
-		} else {
-			LOGGER.info("Billing scheduler is disabled");
-		}
-	}
-
-
-	/**
-	 * Runs billing scheduler for given configuration file.
-	 *
-	 * @param args the arguments of command line.
-	 * @throws InitializationException
-	 */
-	public static void main(String[] args) throws InitializationException {
-		if (ServiceUtils.printAppVersion(BillingTool.class, args)) {
-			return;
-		}
-
-		String confName = null;
-		for (int i = 0; i < args.length; i++) {
-			if (BillingTool.isKey("help", args[i])) {
-				i++;
-				Help.usage(i < args.length ? Arrays.copyOfRange(args, i, args.length) : null);
-				return;
-			} else if (BillingTool.isKey("conf", args[i])) {
-				i++;
-				if (i < args.length) {
-					confName = args[i];
-				} else {
-					throw new InitializationException("Missing the name of configuration file");
-				}
-			} else {
-				throw new InitializationException("Unknow argument: " + args[i]);
-			}
-		}
-
-		if (confName == null) {
-			Help.usage();
-			throw new InitializationException("Missing arguments");
-		}
-
-		BillingTool.setLoggerLevel();
-		try {
-			start(confName);
-		} catch (Exception e) {
-			throw new DlabException("Billing scheduler failed", e);
-		}
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java b/services/billing-aws/src/main/java/com/epam/dlab/BillingService.java
similarity index 82%
copy from integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
copy to services/billing-aws/src/main/java/com/epam/dlab/BillingService.java
index 1e49a60..9b4d6db 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/BillingService.java
@@ -17,7 +17,12 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.docker;
+package com.epam.dlab;
 
-class Labels {
+import com.epam.dlab.dto.billing.BillingData;
+
+import java.util.List;
+
+public interface BillingService {
+    List<BillingData> getBillingData();
 }
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/BillingServiceImpl.java b/services/billing-aws/src/main/java/com/epam/dlab/BillingServiceImpl.java
new file mode 100644
index 0000000..8ac6c48
--- /dev/null
+++ b/services/billing-aws/src/main/java/com/epam/dlab/BillingServiceImpl.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab;
+
+import com.epam.dlab.configuration.BillingToolConfiguration;
+import com.epam.dlab.configuration.BillingToolConfigurationFactory;
+import com.epam.dlab.core.parser.ParserBase;
+import com.epam.dlab.dto.billing.BillingData;
+import com.epam.dlab.exceptions.DlabException;
+import com.epam.dlab.exceptions.InitializationException;
+import com.epam.dlab.util.ServiceUtils;
+import org.bson.Document;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.stereotype.Service;
+
+import java.time.LocalDate;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Collectors;
+
+import static com.epam.dlab.model.aws.ReportLine.FIELD_COST;
+import static com.epam.dlab.model.aws.ReportLine.FIELD_CURRENCY_CODE;
+import static com.epam.dlab.model.aws.ReportLine.FIELD_DLAB_ID;
+import static com.epam.dlab.model.aws.ReportLine.FIELD_PRODUCT;
+import static com.epam.dlab.model.aws.ReportLine.FIELD_RESOURCE_TYPE;
+import static com.epam.dlab.model.aws.ReportLine.FIELD_USAGE_DATE;
+
+@Service
+public class BillingServiceImpl implements BillingService {
+	private static final Logger LOGGER = LoggerFactory.getLogger(BillingServiceImpl.class);
+	private static BillingToolConfiguration configuration;
+
+	public List<BillingData> getBillingData() {
+		try {
+			ParserBase parser = configuration.build();
+
+			LOGGER.info("Try to load billing report for configuration: {}", configuration);
+			List<BillingData> billingData = parser.parse()
+					.stream()
+					.map(this::toBillingData)
+					.collect(Collectors.toList());
+
+			if (!parser.getStatistics().isEmpty()) {
+				LOGGER.info("Billing report parser statistics:");
+				for (int i = 0; i < parser.getStatistics().size(); i++) {
+					LOGGER.info("  {}", parser.getStatistics().get(i).toString());
+				}
+			}
+
+			return billingData;
+		} catch (Exception e) {
+			LOGGER.error("Something went wrong ", e);
+			return Collections.emptyList();
+		}
+	}
+
+	private BillingData toBillingData(Document billingData) {
+		return BillingData.builder()
+				.tag(billingData.getString(FIELD_DLAB_ID).toLowerCase())
+				.usageDateFrom(Optional.ofNullable(billingData.getString(FIELD_USAGE_DATE)).map(LocalDate::parse).orElse(null))
+				.usageDateTo(Optional.ofNullable(billingData.getString(FIELD_USAGE_DATE)).map(LocalDate::parse).orElse(null))
+				.usageDate(billingData.getString(FIELD_USAGE_DATE))
+				.product(billingData.getString(FIELD_PRODUCT))
+				.usageType(billingData.getString(FIELD_RESOURCE_TYPE))
+				.cost(billingData.getDouble(FIELD_COST))
+				.currency(billingData.getString(FIELD_CURRENCY_CODE))
+				.build();
+	}
+
+	public static void initialize(String filename) throws InitializationException {
+		LOGGER.debug("Billing report configuration file: {}", filename);
+		configuration = BillingToolConfigurationFactory.build(filename, BillingToolConfiguration.class);
+	}
+
+	public static void startApplication(String[] args) throws InitializationException {
+		if (ServiceUtils.printAppVersion(BillingTool.class, args)) {
+			return;
+		}
+
+		String confName = null;
+		for (int i = 0; i < args.length; i++) {
+			if (BillingTool.isKey("help", args[i])) {
+				i++;
+				Help.usage(i < args.length ? Arrays.copyOfRange(args, i, args.length) : null);
+				return;
+			} else if (BillingTool.isKey("conf", args[i])) {
+				i++;
+				if (i < args.length) {
+					confName = args[i];
+				} else {
+					throw new InitializationException("Missing the name of configuration file");
+				}
+			}
+		}
+
+		if (confName == null) {
+			Help.usage();
+			throw new InitializationException("Missing arguments");
+		}
+
+		BillingTool.setLoggerLevel();
+		try {
+			initialize(confName);
+		} catch (Exception e) {
+			throw new DlabException("Billing scheduler failed", e);
+		}
+	}
+}
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/BillingTool.java b/services/billing-aws/src/main/java/com/epam/dlab/BillingTool.java
index cf2b8d6..cde9d4e 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/BillingTool.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/BillingTool.java
@@ -19,24 +19,22 @@
 
 package com.epam.dlab;
 
-import java.util.Arrays;
-
-import com.epam.dlab.exceptions.DlabException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import ch.qos.logback.classic.Level;
+import ch.qos.logback.classic.LoggerContext;
 import com.epam.dlab.configuration.BillingToolConfiguration;
 import com.epam.dlab.configuration.BillingToolConfigurationFactory;
 import com.epam.dlab.core.parser.ParserBase;
 import com.epam.dlab.exceptions.AdapterException;
+import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.InitializationException;
 import com.epam.dlab.exceptions.ParseException;
 import com.epam.dlab.util.ServiceUtils;
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import ch.qos.logback.classic.Level;
-import ch.qos.logback.classic.LoggerContext;
+import java.util.Arrays;
 
 /** Provides billing parser features.
  */
@@ -110,14 +108,14 @@
 	 * @throws InitializationException
 	 */
 	public static void main(String[] args) throws InitializationException {
-		if (ServiceUtils.printAppVersion(BillingScheduler.class, args)) {
+		if (ServiceUtils.printAppVersion(BillingServiceImpl.class, args)) {
 			return;
 		}
 
 		String confName = null;
 		String json = null;
-		
-		for(int i = 0; i < args.length; i++) {
+
+		for (int i = 0; i < args.length; i++) {
 			if (isKey("help", args[i])) {
 				i++;
 				Help.usage(i < args.length ? Arrays.copyOfRange(args, i, args.length) : null);
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/Help.java b/services/billing-aws/src/main/java/com/epam/dlab/Help.java
index 2a043c2..c2fe5c2 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/Help.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/Help.java
@@ -19,18 +19,17 @@
 
 package com.epam.dlab;
 
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.lang3.StringUtils;
-
 import com.epam.dlab.core.BillingUtils;
 import com.epam.dlab.core.ModuleType;
 import com.epam.dlab.exceptions.InitializationException;
 import com.fasterxml.jackson.annotation.JsonClassDescription;
 import com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.commons.lang3.StringUtils;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 /** Print help for billing tool.
  */
@@ -47,12 +46,12 @@
 	private static void printHelp(String resourceName, Map<String, String> substitute) throws InitializationException {
 		List<String> list = BillingUtils.getResourceAsList("/" + Help.class.getName() + "." + resourceName + ".txt");
 		String help = StringUtils.join(list, System.lineSeparator());
-		
+
 		if (substitute == null) {
 			substitute = new HashMap<>();
 		}
-		substitute.put("classname", BillingScheduler.class.getName());
-		
+		substitute.put("classname", BillingServiceImpl.class.getName());
+
 		for (String key : substitute.keySet()) {
 			help = StringUtils.replace(help, "${" + key.toUpperCase() + "}", substitute.get(key));
 		}
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/conf/SecurityConfig.java b/services/billing-aws/src/main/java/com/epam/dlab/conf/SecurityConfig.java
new file mode 100644
index 0000000..dba4086
--- /dev/null
+++ b/services/billing-aws/src/main/java/com/epam/dlab/conf/SecurityConfig.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.conf;
+
+import org.keycloak.adapters.KeycloakConfigResolver;
+import org.keycloak.adapters.springboot.KeycloakSpringBootConfigResolver;
+import org.keycloak.adapters.springsecurity.KeycloakConfiguration;
+import org.keycloak.adapters.springsecurity.authentication.KeycloakAuthenticationProvider;
+import org.keycloak.adapters.springsecurity.config.KeycloakWebSecurityConfigurerAdapter;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.annotation.Bean;
+import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
+import org.springframework.security.config.annotation.web.builders.HttpSecurity;
+import org.springframework.security.core.authority.mapping.SimpleAuthorityMapper;
+import org.springframework.security.core.session.SessionRegistryImpl;
+import org.springframework.security.web.authentication.session.RegisterSessionAuthenticationStrategy;
+import org.springframework.security.web.authentication.session.SessionAuthenticationStrategy;
+
+@KeycloakConfiguration
+class SecurityConfig extends KeycloakWebSecurityConfigurerAdapter {
+
+    @Autowired
+    public void configureGlobal(AuthenticationManagerBuilder auth) {
+        KeycloakAuthenticationProvider keycloakAuthenticationProvider = keycloakAuthenticationProvider();
+        keycloakAuthenticationProvider.setGrantedAuthoritiesMapper(new SimpleAuthorityMapper());
+        auth.authenticationProvider(keycloakAuthenticationProvider);
+    }
+
+    @Bean
+    public KeycloakConfigResolver KeycloakConfigResolver() {
+        return new KeycloakSpringBootConfigResolver();
+    }
+
+    @Bean
+    @Override
+    protected SessionAuthenticationStrategy sessionAuthenticationStrategy() {
+        return new RegisterSessionAuthenticationStrategy(new SessionRegistryImpl());
+    }
+
+    @Override
+    protected void configure(HttpSecurity http) throws Exception {
+        super.configure(http);
+        http
+                .anonymous().disable()
+                .authorizeRequests()
+                .anyRequest()
+                .authenticated();
+    }
+}
\ No newline at end of file
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/configuration/BillingToolConfiguration.java b/services/billing-aws/src/main/java/com/epam/dlab/configuration/BillingToolConfiguration.java
index 803d232..420b9e0 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/configuration/BillingToolConfiguration.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/configuration/BillingToolConfiguration.java
@@ -77,13 +77,6 @@
 	private boolean billingEnabled;
 
 	/**
-	 * Working data file name of modules.
-	 */
-	@Valid
-	@JsonProperty
-	private SchedulerConfiguration scheduler = null;
-
-	/**
 	 * Adapter for reading source data.
 	 */
 	@Valid
@@ -136,20 +129,6 @@
 	}
 
 	/**
-	 * Set the scheduler.
-	 */
-	public void setScheduler(SchedulerConfiguration scheduler) {
-		this.scheduler = scheduler;
-	}
-
-	/**
-	 * Return the scheduler.
-	 */
-	public SchedulerConfiguration getScheduler() {
-		return scheduler;
-	}
-
-	/**
 	 * Set the adapter for reading source data.
 	 */
 	public void setAdapterIn(ImmutableList<AdapterBase> adapter) {
@@ -272,14 +251,6 @@
 			f.setModuleData(moduleData);
 		}
 
-		if (scheduler != null) {
-			try {
-				scheduler.build();
-			} catch (Exception e) {
-				throw new InitializationException("Cannot configure billing scheduler. " + e.getLocalizedMessage(), e);
-			}
-		}
-
 		return parser.build(in, out, f);
 	}
 
@@ -295,7 +266,6 @@
 	public ToStringHelper toStringHelper(Object self) {
 		return MoreObjects.toStringHelper(self)
 				.add("moduleData", moduleData)
-				.add("scheduler", scheduler)
 				.add("adapterIn", adapterIn)
 				.add("adapterOut", adapterOut)
 				.add("filter", filter)
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/configuration/SchedulerConfiguration.java b/services/billing-aws/src/main/java/com/epam/dlab/configuration/SchedulerConfiguration.java
deleted file mode 100644
index b0624d6..0000000
--- a/services/billing-aws/src/main/java/com/epam/dlab/configuration/SchedulerConfiguration.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.configuration;
-
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.commons.lang3.StringUtils;
-
-import com.epam.dlab.exceptions.ParseException;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
-
-/** Provides schedule time configuration.
- */
-public class SchedulerConfiguration {
-	
-	/** User's schedule. */
-	@JsonProperty
-	private String schedule = "12, 13:30:23, 18:34, 08:50, 7:80";
-	
-	
-	/** Return the schedule of user.
-	 */
-	public String getSchedule() {
-		return schedule;
-	}
-	
-	/** Set the schedule of user.
-	 */
-	public void setSchedule(String schedule) {
-		this.schedule = schedule;
-	}
-	
-	
-	/** Schedule. */
-	private Map<String, Calendar> realSchedule = new TreeMap<>();
-	
-	/** Build the schedule from user' schedule.
-	 * @throws ParseException
-	 */
-	public void build() throws ParseException {
-		SimpleDateFormat df = new SimpleDateFormat("HH:mm:ss");
-		String [] unitArray = schedule.split(",");
-		realSchedule.clear();
-		for (int i = 0; i < unitArray.length; i++) {
-			Calendar date = Calendar.getInstance();
-			int [] time = getTime(unitArray[i]);
-			try {
-				df.parse(StringUtils.join(time, ':'));
-			} catch (Exception e) {
-				throw new ParseException("Cannot parse date " + unitArray[i] + ". " + e.getLocalizedMessage(), e);
-			}
-			date.clear();
-			date.set(1, 1, 1, time[0], time[1], time[2]);
-			realSchedule.put(df.format(date.getTime()), date);
-		}
-		adjustStartTime();
-	}
-	
-	/** Return the schedule.
-	 */
-	public Map<String, Calendar> getRealSchedule() {
-		return realSchedule;
-	}
-	
-	/** Return time array of user' schedule time.
-	 * @param time the time in format HH:mm:ss.
-	 * @throws ParseException
-	 */
-	private int [] getTime(String time) throws ParseException {
-		String [] timeString = time.trim().split(":");
-		int [] timeInt = new int[3];
-		
-		for (int i = 0; i < timeInt.length; i++) {
-			if (i < timeString.length) {
-				try {
-					timeInt[i] = Integer.parseInt(timeString[i]);
-				} catch (Exception e) {
-					throw new ParseException("Cannot parse date " + time + ". " + e.getLocalizedMessage(), e);
-				}
-			} else {
-				timeInt[i] = 0;
-			}
-		}
-		
-		return timeInt;
-	}
-
-	/** Adjust the time in schedule for current time.
-	 */
-	public void adjustStartTime() {
-		Calendar now = Calendar.getInstance();
-		for(String key : realSchedule.keySet()) {
-			Calendar time = realSchedule.get(key);
-			if (time.before(now)) {
-				time.set(now.get(Calendar.YEAR),
-						now.get(Calendar.MONTH),
-						now.get(Calendar.DAY_OF_MONTH),
-						time.get(Calendar.HOUR_OF_DAY),
-						time.get(Calendar.MINUTE),
-						time.get(Calendar.SECOND));
-				if (time.before(now)) {
-					time.add(Calendar.DAY_OF_MONTH, 1);
-				}
-				realSchedule.put(key, time);
-			}
-		}
-	}
-	
-	/** Return the key of the next start time from the schedule.
-	 */
-	public String getNextTimeKey() {
-		long now = System.currentTimeMillis();
-		String nextKey = null;
-		long nextTime = -1;
-		
-		for(String key : realSchedule.keySet()) {
-			long time = realSchedule.get(key).getTimeInMillis();
-			if ((time >= now && time < nextTime) || nextTime == -1) {
-				nextTime = time;
-				nextKey = key;
-			}
-		}
-		return nextKey;
-	}
-	
-	/** Return the next start time from the schedule.
-	 */
-	public Calendar getNextTime() {
-		String key = getNextTimeKey();
-		return (key == null ? null : realSchedule.get(key));
-	}
-	
-	/** Return the key of the near start time from the schedule to the current time.
-	 */
-	public String getNearTimeKey() {
-		long now = System.currentTimeMillis();
-		String nextKey = null;
-		long nextTime = -1;
-		
-		for(String key : realSchedule.keySet()) {
-			long time = Math.abs(now - realSchedule.get(key).getTimeInMillis());
-			if (time < nextTime || nextTime == -1) {
-				nextTime = time;
-				nextKey = key;
-			}
-		}
-		return nextKey;
-	}
-	
-	/** Return the near start time from the schedule to the current time.
-	 */
-	public Calendar getNearTime() {
-		String key = getNearTimeKey();
-		return (key == null ? null : realSchedule.get(key));
-	}
-	
-	/** Returns a string representation of the object.
-	 * @param self the object to generate the string for (typically this), used only for its class name.
-	 */
-	public ToStringHelper toStringHelper(Object self) {
-		SimpleDateFormat df = new SimpleDateFormat("dd.MM.yyyy HH:mm:ss");
-		ToStringHelper helper = MoreObjects.toStringHelper(self);
-		for(String key : realSchedule.keySet()) {
-			Calendar time = realSchedule.get(key);
-			helper.add(key, df.format(time.getTime()));
-		}
-    	return helper;
-    }
-    
-    @Override
-    public String toString() {
-    	return toStringHelper(this)
-    			.toString();
-    }
-
-	@Override
-	public boolean equals(Object o) {
-		if (this == o) return true;
-		if (!(o instanceof SchedulerConfiguration)) return false;
-
-		SchedulerConfiguration that = (SchedulerConfiguration) o;
-
-		return getRealSchedule() != null ? getRealSchedule().keySet().equals(that.getRealSchedule().keySet())
-				: that.getRealSchedule() == null;
-	}
-
-	@Override
-	public int hashCode() {
-		return getRealSchedule() != null ? getRealSchedule().keySet().hashCode() : 0;
-	}
-}
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/controller/BillingController.java b/services/billing-aws/src/main/java/com/epam/dlab/controller/BillingController.java
new file mode 100644
index 0000000..deabf44
--- /dev/null
+++ b/services/billing-aws/src/main/java/com/epam/dlab/controller/BillingController.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.controller;
+
+import com.epam.dlab.BillingService;
+import com.epam.dlab.dto.billing.BillingData;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RestController;
+
+import java.util.List;
+
+@RestController
+public class BillingController {
+
+    private final BillingService billingService;
+
+    public BillingController(BillingService billingService) {
+        this.billingService = billingService;
+    }
+
+    @GetMapping
+    public ResponseEntity<List<BillingData>> getBilling() {
+        return new ResponseEntity<>(billingService.getBillingData(), HttpStatus.OK);
+    }
+}
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/core/AdapterBase.java b/services/billing-aws/src/main/java/com/epam/dlab/core/AdapterBase.java
index 1569530..475404d 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/core/AdapterBase.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/core/AdapterBase.java
@@ -24,6 +24,7 @@
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import org.bson.Document;
 
 import java.util.List;
 
@@ -157,9 +158,10 @@
 	 * Write the row of data to adapter.
 	 *
 	 * @param row the row of common format.
+	 * @return
 	 * @throws AdapterException
 	 */
-	public abstract void writeRow(ReportLine row) throws AdapterException;
+	public abstract Document writeRow(ReportLine row) throws AdapterException;
 
 
 	@Override
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserBase.java b/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserBase.java
index f9f0eaa..bfd86bc 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserBase.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserBase.java
@@ -19,13 +19,6 @@
 
 package com.epam.dlab.core.parser;
 
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.validation.constraints.NotNull;
-
-import org.apache.commons.lang3.StringUtils;
-
 import com.epam.dlab.core.AdapterBase;
 import com.epam.dlab.core.FilterBase;
 import com.epam.dlab.core.ModuleBase;
@@ -37,6 +30,12 @@
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import org.apache.commons.lang3.StringUtils;
+import org.bson.Document;
+
+import javax.validation.constraints.NotNull;
+import java.util.ArrayList;
+import java.util.List;
 
 /** Abstract module of parser.<br>
  * See description of {@link ModuleBase} how to create your own parser.
@@ -234,13 +233,16 @@
 	 * @throws InitializationException
 	 */
 	public abstract void initialize()  throws InitializationException;
-	
-	/** Parse the source data to common format and write it to output adapter.
+
+	/**
+	 * Parse the source data to common format and write it to output adapter.
+	 *
+	 * @return
 	 * @throws InitializationException
 	 * @throws AdapterException
 	 * @throws ParseException
 	 */
-	public abstract void parse() throws InitializationException, AdapterException, ParseException;
+	public abstract List<Document> parse() throws InitializationException, AdapterException, ParseException;
 	
 	/** Build parser from given modules.
 	 * @param adapterIn the adapter for reading source data.
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserByLine.java b/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserByLine.java
index 37f2070..d878cb9 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserByLine.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/core/parser/ParserByLine.java
@@ -27,10 +27,12 @@
 import com.epam.dlab.exceptions.ParseException;
 import com.epam.dlab.model.aws.ReportLine;
 import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.bson.Document;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 
 /**
@@ -156,11 +158,13 @@
 	/**
 	 * Parse the source data to common format and write it to output adapter.
 	 *
+	 * @return list of billing data
 	 * @throws InitializationException
 	 * @throws AdapterException
 	 * @throws ParseException
 	 */
-	public void parse() throws InitializationException, AdapterException, ParseException {
+	public List<Document> parse() throws InitializationException, AdapterException, ParseException {
+		List<Document> billingData = new ArrayList<>();
 		try {
 			if (init()) {
 				String line;
@@ -211,14 +215,14 @@
 						if (getAggregate() != AggregateGranularity.NONE) {
 							getAggregator().append(reportLine);
 						} else {
-							getAdapterOut().writeRow(reportLine);
+							billingData.add(getAdapterOut().writeRow(reportLine));
 							getCurrentStatistics().incrRowWritten();
 						}
 					}
 
 					if (getAggregate() != AggregateGranularity.NONE) {
 						for (int i = 0; i < getAggregator().size(); i++) {
-							getAdapterOut().writeRow(getAggregator().get(i));
+							billingData.add(getAdapterOut().writeRow(getAggregator().get(i)));
 							getCurrentStatistics().incrRowWritten();
 						}
 					}
@@ -255,5 +259,6 @@
 		if (getCurrentStatistics() != null) {
 			getCurrentStatistics().stop();
 		}
+		return billingData;
 	}
 }
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterConsole.java b/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterConsole.java
index 59c866d..3bffa79 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterConsole.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterConsole.java
@@ -25,6 +25,7 @@
 import com.epam.dlab.model.aws.ReportLine;
 import com.fasterxml.jackson.annotation.JsonClassDescription;
 import com.fasterxml.jackson.annotation.JsonTypeName;
+import org.bson.Document;
 
 import java.util.List;
 
@@ -84,7 +85,8 @@
 	}
 
 	@Override
-	public void writeRow(ReportLine row) throws AdapterException {
+	public Document writeRow(ReportLine row) throws AdapterException {
 		System.out.println(CommonFormat.rowToString(row));
+		return null;
 	}
 }
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterFile.java b/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterFile.java
index 7fb38f3..dd256eb 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterFile.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/module/AdapterFile.java
@@ -19,15 +19,6 @@
 
 package com.epam.dlab.module;
 
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.List;
-
-import javax.validation.constraints.NotNull;
-
 import com.epam.dlab.core.AdapterBase;
 import com.epam.dlab.core.parser.CommonFormat;
 import com.epam.dlab.exceptions.AdapterException;
@@ -37,6 +28,15 @@
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import org.bson.Document;
+
+import javax.validation.constraints.NotNull;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.List;
 
 /** The adapter for file system.
  */
@@ -137,15 +137,16 @@
 			throw new AdapterException("Cannot write file " + file + ". " + e.getLocalizedMessage(), e);
 		}
 	}
-	
+
 	@Override
-	public void writeRow(ReportLine row) throws AdapterException {
+	public Document writeRow(ReportLine row) throws AdapterException {
 		try {
 			writer.write(CommonFormat.rowToString(row));
 			writer.write(System.lineSeparator());
 		} catch (IOException e) {
 			throw new AdapterException("Cannot write file " + file + ". " + e.getLocalizedMessage(), e);
 		}
+		return null;
 	}
 	
 	
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/module/aws/AdapterS3File.java b/services/billing-aws/src/main/java/com/epam/dlab/module/aws/AdapterS3File.java
index 9dc7e07..0579063 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/module/aws/AdapterS3File.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/module/aws/AdapterS3File.java
@@ -33,6 +33,7 @@
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import org.bson.Document;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -308,7 +309,7 @@
 	}
 
 	@Override
-	public void writeRow(ReportLine row) throws AdapterException {
+	public Document writeRow(ReportLine row) throws AdapterException {
 		throw new AdapterException("Unimplemented method.");
 	}
 
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java b/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java
index a13ed05..db92a80 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java
@@ -31,12 +31,17 @@
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.mongodb.client.MongoCollection;
+import com.mongodb.client.model.UpdateOptions;
 import org.bson.Document;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.TreeSet;
 
+import static com.epam.dlab.mongo.MongoConstants.COLLECTION_SETTINGS;
+import static com.epam.dlab.mongo.MongoConstants.FIELD_SERIVICE_BASE_NAME;
+import static com.mongodb.client.model.Filters.eq;
+
 /**
  * The adapter for file system.
  */
@@ -67,6 +72,17 @@
 	@JsonProperty
 	private boolean upsert = false;
 
+	@JsonProperty
+	private String serviceBaseName;
+
+	public String getServiceBaseName() {
+		return serviceBaseName;
+	}
+
+	public void setServiceBaseName(String serviceBaseName) {
+		this.serviceBaseName = serviceBaseName;
+	}
+
 	/**
 	 * Return the size of buffer for bulk insert.
 	 */
@@ -142,6 +158,7 @@
 				throw new AdapterException("Mode of " + getType() + " adapter may be " + Mode.WRITE + " only.");
 			}
 			connection = new MongoDbConnection(getHost(), getPort(), getDatabase(), getUsername(), getPassword());
+			setServiceBaseName();
 			collection = connection.getCollection(MongoConstants.COLLECTION_BILLING);
 			try {
 				resourceTypeDAO = new DlabResourceTypeDAO(connection);
@@ -158,6 +175,12 @@
 		}
 	}
 
+	private void setServiceBaseName() {
+		connection.getCollection(COLLECTION_SETTINGS)
+				.updateOne(eq("_id", FIELD_SERIVICE_BASE_NAME), new Document("$set", new Document("value", serviceBaseName)),
+						new UpdateOptions().upsert(true));
+	}
+
 	@Override
 	public void close() throws AdapterException {
 		if (connection != null) {
@@ -191,12 +214,12 @@
 	}
 
 	@Override
-	public void writeHeader(List<String> header) throws AdapterException {
+	public void writeHeader(List<String> header) {
 		// Nothing to do
 	}
 
 	@Override
-	public void writeRow(ReportLine row) throws AdapterException {
+	public Document writeRow(ReportLine row) throws AdapterException {
 		Document document;
 		try {
 			document = resourceTypeDAO.transform(row);
@@ -204,20 +227,21 @@
 			throw new AdapterException("Cannot transform report line. " + e.getLocalizedMessage(), e);
 		}
 
-		usageDateList.append(row.getUsageDate());
-		if (upsert) {
-			buffer.add(document);
-			if (buffer.size() >= bufferSize) {
-				connection.upsertRows(collection, buffer, usageDateList);
-			}
-		} else if (bufferSize > 0) {
-			buffer.add(document);
-			if (buffer.size() >= bufferSize) {
-				connection.insertRows(collection, buffer);
-			}
-		} else {
-			connection.insertOne(collection, document);
-		}
+//		usageDateList.append(row.getUsageDate());
+//		if (upsert) {
+//			buffer.add(document);
+//			if (buffer.size() >= bufferSize) {
+//				connection.upsertRows(collection, buffer, usageDateList);
+//			}
+//		} else if (bufferSize > 0) {
+//			buffer.add(document);
+//			if (buffer.size() >= bufferSize) {
+//				connection.insertRows(collection, buffer);
+//			}
+//		} else {
+//			connection.insertOne(collection, document);
+//		}
+		return document;
 	}
 
 	/**
@@ -235,13 +259,6 @@
 		} catch (Exception e) {
 			throw new AdapterException("Cannot update total monthly cost. " + e.getLocalizedMessage(), e);
 		}
-		try {
-			if (months.size() > 0) {
-				resourceTypeDAO.updateExploratoryCost();
-			}
-		} catch (Exception e) {
-			throw new AdapterException("Cannot update total cost of exploratory. " + e.getLocalizedMessage(), e);
-		}
 	}
 
 	@Override
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/mongo/DlabResourceTypeDAO.java b/services/billing-aws/src/main/java/com/epam/dlab/mongo/DlabResourceTypeDAO.java
index 8e56aea..0c95605 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/mongo/DlabResourceTypeDAO.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/mongo/DlabResourceTypeDAO.java
@@ -19,28 +19,28 @@
 
 package com.epam.dlab.mongo;
 
-import com.epam.dlab.billing.BillingCalculationUtils;
-import com.epam.dlab.billing.DlabResourceType;
-import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.exceptions.InitializationException;
 import com.epam.dlab.exceptions.ParseException;
 import com.epam.dlab.model.aws.ReportLine;
 import com.mongodb.client.AggregateIterable;
 import com.mongodb.client.MongoCollection;
-import com.mongodb.client.model.Updates;
-import org.apache.commons.lang3.StringUtils;
 import org.bson.Document;
 import org.bson.conversions.Bson;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.text.SimpleDateFormat;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 
-import static com.mongodb.client.model.Accumulators.*;
-import static com.mongodb.client.model.Aggregates.*;
-import static com.mongodb.client.model.Filters.*;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Accumulators.sum;
+import static com.mongodb.client.model.Aggregates.group;
+import static com.mongodb.client.model.Aggregates.match;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.gte;
+import static com.mongodb.client.model.Filters.lte;
 import static org.apache.commons.lang3.StringUtils.EMPTY;
 
 /**
@@ -48,8 +48,6 @@
  */
 public class DlabResourceTypeDAO implements MongoConstants {
 	private static final Logger LOGGER = LoggerFactory.getLogger(DlabResourceTypeDAO.class);
-	private static final String VOLUME_PRIMARY_SUFFIX = "-volume-primary";
-	private static final String VOLUME_SECONDARY_SUFFIX = "-volume-secondary";
 
 	/**
 	 * Mongo database connection.
@@ -63,11 +61,6 @@
 	private String serviceBaseNameId;
 
 	/**
-	 * Describe all DLab resources: SSN, EDGE, exploratory, computational and buckets.
-	 */
-	private ResourceItemList resourceList;
-
-	/**
 	 * Instantiate DAO for billing resources.
 	 *
 	 * @param connection the connection to Mongo DB.
@@ -76,7 +69,6 @@
 	public DlabResourceTypeDAO(MongoDbConnection connection) throws InitializationException {
 		this.connection = connection;
 		setServiceBaseName();
-		setResourceList();
 	}
 
 	/**
@@ -110,129 +102,6 @@
 	}
 
 	/**
-	 * Return DLab resources from Mongo DB.
-	 *
-	 * @throws InitializationException
-	 */
-	public ResourceItemList getResourceList() {
-		return resourceList;
-	}
-
-	/**
-	 * Load and return DLab resources from Mongo DB.
-	 *
-	 * @throws InitializationException
-	 */
-	private void setResourceList() {
-		resourceList = new ResourceItemList();
-
-		// Add SSN
-		String sbName = getServiceBaseName();
-		resourceList.append(sbName + "-ssn", "SSN", DlabResourceType.SSN);
-		resourceList.append(sbName + "-ssn-volume-primary", "SSN volume", DlabResourceType.VOLUME);
-		resourceList.append(sbName + "-ssn-bucket", "SSN bucket", DlabResourceType.SSN_BUCKET);
-
-		// collaboration bucket
-		resourceList.append(sbName + "-shared-bucket", "Collaboration bucket", DlabResourceType
-				.COLLABORATION_BUCKET);
-
-		// Add PROJECTS
-		Bson projection = fields(include("name", "endpoints"));
-		Iterable<Document> docs = connection.getCollection("Projects").find().projection(projection);
-		for (Document d : docs) {
-			String projectName = d.getString("name");
-			((List<Document>) d.get("endpoints"))
-					.stream()
-					.map(endpoint -> endpoint.getString("name"))
-					.forEach(endpoint -> {
-						resourceList.append(sbName + "-" + projectName + "-" + endpoint + "-edge", "EDGE Node",
-								DlabResourceType.EDGE, null, null, projectName);
-						resourceList.append(sbName + "-" + projectName+ "-" + endpoint + "-bucket", "Project bucket",
-								DlabResourceType.COLLABORATION_BUCKET, null, null, projectName);
-						resourceList.append(sbName + "-" + projectName+ "-" + endpoint + "-edge-volume-primary",
-								"EDGE Volume", DlabResourceType.VOLUME, null, null, projectName);
-					});
-		}
-
-		// Add exploratory
-		projection = fields(include(FIELD_USER,
-				FIELD_EXPLORATORY_NAME,
-				FIELD_EXPLORATORY_ID,
-				FIELD_PROJECT,
-				FIELD_COMPUTATIONAL_RESOURCES + "." + FIELD_COMPUTATIONAL_ID,
-				FIELD_COMPUTATIONAL_RESOURCES + "." + FIELD_COMPUTATIONAL_NAME,
-				FIELD_COMPUTATIONAL_RESOURCES + "." + FIELD_IMAGE,
-				FIELD_COMPUTATIONAL_RESOURCES + "." + FIELD_DATAENGINE_INSTANCE_COUNT),
-				excludeId());
-		docs = connection.getCollection(COLLECTION_USER_INSTANCES).find().projection(projection);
-		for (Document exp : docs) {
-			String username = exp.getString(FIELD_USER);
-			String exploratoryName = exp.getString(FIELD_EXPLORATORY_NAME);
-			String exploratoryId = exp.getString(FIELD_EXPLORATORY_ID);
-			String project = exp.getString(FIELD_PROJECT);
-			resourceList.append(exploratoryId, exploratoryName, DlabResourceType.EXPLORATORY, username,
-					exploratoryName, project);
-			appendExploratoryVolumes(username, exploratoryName, exploratoryId, project);
-
-			// Add computational
-			@SuppressWarnings("unchecked")
-			List<Document> compList = (List<Document>) exp.get(FIELD_COMPUTATIONAL_RESOURCES);
-			if (compList == null) {
-				continue;
-			}
-			for (Document comp : compList) {
-				String computationalId = comp.getString(FIELD_COMPUTATIONAL_ID);
-				String computationalName = comp.getString(FIELD_COMPUTATIONAL_NAME);
-				final DataEngineType dataEngineType = DataEngineType.fromDockerImageName(comp.getString(FIELD_IMAGE));
-				resourceList.append(computationalId, computationalName, DlabResourceType.COMPUTATIONAL, username,
-						exploratoryName, project);
-				if (DataEngineType.CLOUD_SERVICE == dataEngineType) {
-					appendDataengineServiceVolumes(username, exploratoryName, computationalId, computationalName,
-							project);
-				} else {
-					appendDataengineVolumes(username, exploratoryName, comp, computationalId, computationalName,
-							project);
-				}
-			}
-		}
-		LOGGER.debug("resourceList is {}", resourceList);
-	}
-
-	private void appendExploratoryVolumes(String username, String exploratoryName, String exploratoryId,
-										  String project) {
-		resourceList.append(exploratoryId + VOLUME_PRIMARY_SUFFIX, "Volume primary", DlabResourceType.VOLUME,
-				username, exploratoryName, project);
-		resourceList.append(exploratoryId + VOLUME_SECONDARY_SUFFIX, "Volume secondary", DlabResourceType.VOLUME,
-				username, exploratoryName, project);
-	}
-
-	private void appendDataengineServiceVolumes(String username, String exploratoryName, String computationalId,
-												String computationalName, String project) {
-		resourceList.append(computationalId + VOLUME_PRIMARY_SUFFIX, computationalName + " volume primary",
-				DlabResourceType.VOLUME, username, exploratoryName, project);
-		resourceList.append(computationalId + VOLUME_SECONDARY_SUFFIX, computationalName + " volume secondary",
-				DlabResourceType.VOLUME, username, exploratoryName, project);
-	}
-
-	private void appendDataengineVolumes(String username, String exploratoryName, Document comp, String
-			computationalId, String computationalName, String project) {
-		resourceList.append(computationalId + "-m-volume-primary", computationalName + " master volume primary",
-				DlabResourceType.VOLUME, username, exploratoryName, project);
-		resourceList.append(computationalId + "-m-volume-secondary", computationalName + " master volume secondary",
-				DlabResourceType.VOLUME, username, exploratoryName, project);
-		final Integer instanceCount = Integer.valueOf(comp.getString(FIELD_DATAENGINE_INSTANCE_COUNT));
-		for (int i = instanceCount - 1; i > 0; i--) {
-			final String slaveId = computationalId + "-s" + i;
-			final String slaveName = computationalName + "-s" + i;
-			resourceList.append(slaveId + VOLUME_PRIMARY_SUFFIX, slaveName + " volume primary", DlabResourceType
-					.VOLUME, username, exploratoryName, project);
-			resourceList.append(slaveId + VOLUME_SECONDARY_SUFFIX, slaveName + " volume secondary", DlabResourceType
-					.VOLUME, username, exploratoryName, project);
-		}
-	}
-
-
-	/**
 	 * Convert and return the report line of billing to Mongo document.
 	 *
 	 * @param row report line.
@@ -246,21 +115,7 @@
 					resourceId);
 		}
 		resourceId = resourceId.substring(serviceBaseNameId.length());
-
-		ResourceItem resource = resourceList.getById(resourceId);
 		Document d = new Document(ReportLine.FIELD_DLAB_ID, resourceId);
-		if (resource == null) {
-			d.put(FIELD_DLAB_RESOURCE_ID, null);
-			d.put(FIELD_DLAB_RESOURCE_TYPE, null);
-			d.put(ReportLine.FIELD_USER_ID, null);
-			d.put(FIELD_EXPLORATORY_NAME, null);
-		} else {
-			d.put(FIELD_DLAB_RESOURCE_ID, resource.getResourceId());
-			d.put(FIELD_DLAB_RESOURCE_TYPE, resource.getType().toString());
-			d.put(ReportLine.FIELD_USER_ID, resource.getUser());
-			d.put(FIELD_EXPLORATORY_NAME, resource.getExploratoryName());
-			d.put(FIELD_PROJECT, resource.getProject());
-		}
 		return d.append(ReportLine.FIELD_USAGE_DATE, row.getUsageDate())
 				.append(ReportLine.FIELD_PRODUCT, row.getProduct())
 				.append(ReportLine.FIELD_USAGE_TYPE, row.getUsageType())
@@ -278,7 +133,7 @@
 	 *
 	 * @param fieldNames the list of field names.
 	 */
-	private Document getGrouppingFields(String... fieldNames) {
+	private Document getGroupingFields(String... fieldNames) {
 		Document d = new Document();
 		for (String name : fieldNames) {
 			d.put(name, "$" + name);
@@ -305,7 +160,7 @@
 		List<? extends Bson> pipeline = Arrays.asList(
 				match(and(gte(ReportLine.FIELD_USAGE_DATE, month + "-01"),
 						lte(ReportLine.FIELD_USAGE_DATE, month + "-31"))),
-				group(getGrouppingFields(FIELD_DLAB_RESOURCE_ID,
+				group(getGroupingFields(FIELD_DLAB_RESOURCE_ID,
 						FIELD_DLAB_RESOURCE_TYPE,
 						FIELD_USER,
 						FIELD_EXPLORATORY_NAME,
@@ -331,128 +186,4 @@
 			collection.insertMany(totals);
 		}
 	}
-
-	/**
-	 * Comparator to sort billing exploratory details.
-	 */
-	class BillingComparator implements Comparator<Document> {
-		@Override
-		public int compare(Document d1, Document d2) {
-			int result = StringUtils.compare(d1.getString(FIELD_RESOURCE_NAME), d2.getString(FIELD_RESOURCE_NAME));
-			if (result == 0) {
-				result = StringUtils.compare(d1.getString(ReportLine.FIELD_PRODUCT), d2.getString(ReportLine
-						.FIELD_PRODUCT));
-				if (result == 0) {
-					return StringUtils.compare(d1.getString(ReportLine.FIELD_RESOURCE_TYPE), d2.getString(ReportLine
-							.FIELD_RESOURCE_TYPE));
-				}
-			}
-			return result;
-		}
-	}
-
-	/**
-	 * Update exploratory cost in Mongo DB.
-	 *
-	 * @param user            the name of user.
-	 * @param exploratoryName id of exploratory.
-	 */
-	private void updateExploratoryCost(String user, String exploratoryName) {
-		LOGGER.debug("Update explorartory {} cost for user {}", exploratoryName, user);
-		List<? extends Bson> pipeline = Arrays.asList(
-				match(and(eq(FIELD_USER, user),
-						eq(FIELD_EXPLORATORY_NAME, exploratoryName))),
-				group(getGrouppingFields(FIELD_DLAB_RESOURCE_ID,
-						ReportLine.FIELD_PRODUCT,
-						ReportLine.FIELD_RESOURCE_TYPE,
-						ReportLine.FIELD_CURRENCY_CODE),
-						sum(ReportLine.FIELD_COST, "$" + ReportLine.FIELD_COST),
-						min(FIELD_USAGE_DATE_START, "$" + ReportLine.FIELD_USAGE_DATE),
-						max(FIELD_USAGE_DATE_END, "$" + ReportLine.FIELD_USAGE_DATE)
-				),
-				sort(new Document(FIELD_ID + "." + FIELD_DLAB_RESOURCE_ID, 1).append(FIELD_ID + "." + ReportLine
-						.FIELD_PRODUCT, 1))
-		);
-		AggregateIterable<Document> docs = connection.getCollection(COLLECTION_BILLING)
-				.aggregate(pipeline);
-		LinkedList<Document> billing = new LinkedList<>();
-		ResourceItemList resources = getResourceList();
-		Double costTotal = null;
-		String currencyCode = null;
-		for (Document d : docs) {
-			Document id = (Document) d.get(FIELD_ID);
-			double cost = BillingCalculationUtils.round(d.getDouble(ReportLine.FIELD_COST), 2);
-			costTotal = (costTotal == null ? cost : costTotal + cost);
-			if (currencyCode == null) {
-				currencyCode = id.getString(ReportLine.FIELD_CURRENCY_CODE);
-			}
-
-			Document total = new Document()
-					.append(FIELD_RESOURCE_NAME, resources.getById(id.getString(FIELD_DLAB_RESOURCE_ID))
-							.getResourceName())
-					.append(ReportLine.FIELD_PRODUCT, id.getString(ReportLine.FIELD_PRODUCT))
-					.append(ReportLine.FIELD_RESOURCE_TYPE, id.getString(ReportLine.FIELD_RESOURCE_TYPE))
-					.append(ReportLine.FIELD_COST, BillingCalculationUtils.formatDouble(cost))
-					.append(ReportLine.FIELD_CURRENCY_CODE, id.getString(ReportLine.FIELD_CURRENCY_CODE))
-					.append(FIELD_USAGE_DATE_START, d.getString(FIELD_USAGE_DATE_START))
-					.append(FIELD_USAGE_DATE_END, d.getString(FIELD_USAGE_DATE_END));
-			billing.add(total);
-		}
-
-		LOGGER.debug("Total explorartory {} cost for user {} is {} {}, detail count is {}",
-				exploratoryName, user, costTotal, currencyCode, billing.size());
-		billing.sort(new BillingComparator());
-
-		MongoCollection<Document> cExploratory = connection.getCollection(COLLECTION_USER_INSTANCES);
-		Bson values = Updates.combine(
-				Updates.set(ReportLine.FIELD_COST, BillingCalculationUtils.formatDouble(costTotal)),
-				Updates.set(FIELD_CURRENCY_CODE, currencyCode),
-				Updates.set(COLLECTION_BILLING, (!billing.isEmpty() ? billing : null)));
-		cExploratory.updateOne(
-				and(and(eq(FIELD_USER, user),
-						eq(FIELD_EXPLORATORY_NAME, exploratoryName))),
-				values);
-	}
-
-	/**
-	 * Update EDGE cost in Mongo DB.
-	 *
-	 * @param user the name of user.
-	 */
-	private void updateEdgeCost(String user) {
-		List<? extends Bson> pipeline = Arrays.asList(
-				match(and(eq(FIELD_USER, user),
-						eq(FIELD_EXPLORATORY_NAME, null))),
-				group(getGrouppingFields(ReportLine.FIELD_CURRENCY_CODE),
-						sum(ReportLine.FIELD_COST, "$" + ReportLine.FIELD_COST))
-		);
-		AggregateIterable<Document> docs = connection.getCollection(COLLECTION_BILLING_TOTAL)
-				.aggregate(pipeline);
-
-		MongoCollection<Document> cEdge = connection.getCollection(COLLECTION_USER_EDGE);
-		for (Document d : docs) {
-			Document id = (Document) d.get(FIELD_ID);
-			Bson values = Updates.combine(
-					Updates.set(ReportLine.FIELD_COST, BillingCalculationUtils.round(d.getDouble(ReportLine
-							.FIELD_COST), 2)),
-					Updates.set(FIELD_CURRENCY_CODE, id.get(ReportLine.FIELD_CURRENCY_CODE)));
-			cEdge.updateOne(
-					eq(FIELD_ID, user),
-					values);
-		}
-	}
-
-	/**
-	 * Update the cost of exploratory environment for all users in Mongo DB.
-	 */
-	public void updateExploratoryCost() {
-		for (int i = 0; i < resourceList.size(); i++) {
-			ResourceItem item = resourceList.get(i);
-			if (item.getType() == DlabResourceType.EXPLORATORY) {
-				updateExploratoryCost(item.getUser(), item.getExploratoryName());
-			} else if (item.getType() == DlabResourceType.EDGE) {
-				updateEdgeCost(item.getUser());
-			}
-		}
-	}
 }
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/mongo/ResourceItem.java b/services/billing-aws/src/main/java/com/epam/dlab/mongo/ResourceItem.java
deleted file mode 100644
index d92b2d6..0000000
--- a/services/billing-aws/src/main/java/com/epam/dlab/mongo/ResourceItem.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.mongo;
-
-import com.epam.dlab.billing.DlabResourceType;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
-import org.apache.commons.lang3.StringUtils;
-
-/**
- * The resource of DLab environment.
- */
-public class ResourceItem implements Comparable<ResourceItem> {
-
-	private final String project;
-	/**
-	 * Resource ID.
-	 */
-	String resourceId;
-
-	/**
-	 * User friendly name of resource.
-	 */
-	String resourceName;
-
-	/**
-	 * Type of resource.
-	 */
-	DlabResourceType type;
-
-	/**
-	 * Name of user.
-	 */
-	String user;
-
-	/**
-	 * Name of exploratory.
-	 */
-	String exploratoryName;
-
-	/**
-	 * Instantiate resource of DLab environment.
-	 *
-	 * @param resourceId      resource id.
-	 * @param type            the type of resource.
-	 * @param user            the name of user.
-	 * @param exploratoryName the name of exploratory.
-	 */
-	public ResourceItem(String resourceId, String resourceName, DlabResourceType type,
-						String user, String exploratoryName, String project) {
-		this.resourceId = resourceId;
-		this.resourceName = resourceName;
-		this.type = type;
-		this.user = user;
-		this.exploratoryName = exploratoryName;
-		this.project = project;
-	}
-
-	@Override
-	public int compareTo(ResourceItem o) {
-		if (o == null) {
-			return -1;
-		}
-		int result = StringUtils.compare(resourceId, o.resourceId);
-		if (result == 0) {
-			result = StringUtils.compare(exploratoryName, o.exploratoryName);
-			if (result == 0) {
-				result = StringUtils.compare(type.name(), o.type.name());
-				if (result == 0) {
-					return StringUtils.compare(user, o.user);
-				}
-			}
-		}
-		return result;
-	}
-
-
-	/**
-	 * Returns the resource id.
-	 */
-	public String getResourceId() {
-		return resourceId;
-	}
-
-	/**
-	 * Return user friendly name of resource.
-	 */
-	public String getResourceName() {
-		return resourceName;
-	}
-
-	/**
-	 * Returns the type of resource.
-	 */
-	public DlabResourceType getType() {
-		return type;
-	}
-
-	/**
-	 * Returns the name of user.
-	 */
-	public String getUser() {
-		return user;
-	}
-
-	/**
-	 * Returns the name of exploratory.
-	 */
-	public String getExploratoryName() {
-		return exploratoryName;
-	}
-
-	public String getProject() {
-		return project;
-	}
-
-	/**
-	 * Returns a string representation of the object.
-	 *
-	 * @param self the object to generate the string for (typically this), used only for its class name.
-	 */
-	public ToStringHelper toStringHelper(Object self) {
-		return MoreObjects.toStringHelper(self)
-				.add("resourceId", resourceId)
-				.add("resourceName", resourceName)
-				.add("type", type)
-				.add("user", user)
-				.add("exploratoryName", exploratoryName);
-	}
-
-	@Override
-	public String toString() {
-		return toStringHelper(this).toString();
-	}
-}
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/mongo/ResourceItemList.java b/services/billing-aws/src/main/java/com/epam/dlab/mongo/ResourceItemList.java
deleted file mode 100644
index d5c0091..0000000
--- a/services/billing-aws/src/main/java/com/epam/dlab/mongo/ResourceItemList.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.mongo;
-
-import com.epam.dlab.billing.DlabResourceType;
-import com.google.common.base.MoreObjects;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Vector;
-
-/**
- * List of the DLab's resources.
- */
-public class ResourceItemList {
-	/**
-	 * List of the resources.
-	 */
-	private final Vector<ResourceItem> list;
-
-
-	/**
-	 * Constructs an empty list of resources.
-	 */
-	public ResourceItemList() {
-		list = new Vector<>();
-	}
-
-
-	/**
-	 * Appends the resource to the list and returns it.
-	 *
-	 * @param resourceId      the resource id.
-	 * @param resourceName    the user friendly name of resource.
-	 * @param type            the type of resource.
-	 * @param user            the name of user.
-	 * @param exploratoryName the name of exploratory.
-	 * @return Instance of the resource.
-	 */
-	public ResourceItem append(String resourceId, String resourceName, DlabResourceType type, String user,
-							   String exploratoryName, String project) {
-		ResourceItem item = new ResourceItem(resourceId, resourceName, type, user, exploratoryName, project);
-		synchronized (this) {
-			int index = Collections.binarySearch(list, item);
-			if (index < 0) {
-				index = -index;
-				if (index > list.size()) {
-					list.add(item);
-				} else {
-					list.add(index - 1, item);
-				}
-			} else {
-				item = list.get(index);
-			}
-		}
-		return item;
-	}
-
-	public ResourceItem append(String resourceId, String resourceName, DlabResourceType type) {
-		return append(resourceId, resourceName, type, null, null, null);
-	}
-
-	/**
-	 * Returns the number of the range in list.
-	 */
-	public int size() {
-		return list.size();
-	}
-
-	/**
-	 * Returns the resource.
-	 *
-	 * @param index index of the resource.
-	 */
-	public ResourceItem get(int index) {
-		return list.get(index);
-	}
-
-	/**
-	 * Comparator for search resource item by resource id.
-	 */
-	private final ResourceItem findItemById = new ResourceItem(null, null, null, null, null, null);
-	private final ComparatorByName compareByName = new ComparatorByName();
-
-	private class ComparatorByName implements Comparator<ResourceItem> {
-
-		@Override
-		public int compare(ResourceItem o1, ResourceItem o2) {
-			return StringUtils.compare(o1.resourceId, o2.resourceId);
-		}
-
-	}
-
-	/**
-	 * Find and return the resource by resource id.
-	 *
-	 * @param resourceId index of the resource.
-	 */
-	public ResourceItem getById(String resourceId) {
-		findItemById.resourceId = resourceId;
-		int index = Collections.binarySearch(list, findItemById, compareByName);
-
-		return (index < 0 ? null : list.get(index));
-	}
-
-	/**
-	 * Removes all of the elements from list.
-	 */
-	public void clear() {
-		list.clear();
-	}
-
-
-	@Override
-	public String toString() {
-		return MoreObjects.toStringHelper(this).add("items", list).toString();
-	}
-}
diff --git a/services/billing-aws/src/main/resources/application.yml b/services/billing-aws/src/main/resources/application.yml
new file mode 100644
index 0000000..8bd3a4f
--- /dev/null
+++ b/services/billing-aws/src/main/resources/application.yml
@@ -0,0 +1,55 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+spring:
+  main:
+    allow-bean-definition-overriding: true
+  data:
+    mongodb:
+      username: admin
+      password: MONGO_PASSWORD
+      database: dlabdb
+      port: 27017
+      host: MONGO_HOST
+
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
+
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /home/OS_USER/keys/ssn.keystore.jks
+server.ssl.key-store-password: KEY_STORE_PASSWORD
+server.ssl.key-alias: ssn
+
+logging:
+  file: /var/opt/dlab/log/ssn/billing.log
+  level:
+    com:
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: dlab
+  resource: KEYCLOAK_CLIENT_ID
+  credentials.secret: KEYCLOAK_CLIENT_SECRET
+  ssl-required: none
+  auth-server-url: KEYCLOAK_AUTH_SERVER_URL
diff --git a/services/billing-aws/src/test/java/com/epam/dlab/mongo/ResourceItemListTest.java b/services/billing-aws/src/test/java/com/epam/dlab/mongo/ResourceItemListTest.java
deleted file mode 100644
index 1c90bb8..0000000
--- a/services/billing-aws/src/test/java/com/epam/dlab/mongo/ResourceItemListTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.mongo;
-
-import static junit.framework.TestCase.assertEquals;
-
-import com.epam.dlab.billing.DlabResourceType;
-import org.junit.Test;
-
-public class ResourceItemListTest {
-
-	@Test
-	public void append() {
-		ResourceItemList list = new ResourceItemList();
-		list.append("tag-user-nb-exp", "exp", DlabResourceType.EXPLORATORY, "user", "exp", "");
-		list.append("tag-user-emr-exp-comp", "comp", DlabResourceType.COMPUTATIONAL, "user", "exp", "");
-		
-		assertEquals(2, list.size());
-		
-		ResourceItem comp = list.get(0);
-		assertEquals("tag-user-emr-exp-comp", comp.getResourceId());
-		assertEquals("comp", comp.getResourceName());
-		assertEquals(DlabResourceType.COMPUTATIONAL, comp.getType());
-		assertEquals("user", comp.getUser());
-		assertEquals("exp", comp.getExploratoryName());
-		
-		ResourceItem exp = list.get(1);
-		assertEquals("tag-user-nb-exp", exp.getResourceId());
-		assertEquals("exp", exp.getResourceName());
-		assertEquals(DlabResourceType.EXPLORATORY, exp.getType());
-		assertEquals("user", exp.getUser());
-		assertEquals("exp", exp.getExploratoryName());
-		
-		list.clear();
-		assertEquals(0, list.size());
-	}
-}
diff --git a/services/self-service/Dockerfile_gcp b/services/billing-azure/Dockerfile
similarity index 77%
rename from services/self-service/Dockerfile_gcp
rename to services/billing-azure/Dockerfile
index 16da950..dc19faf 100644
--- a/services/self-service/Dockerfile_gcp
+++ b/services/billing-azure/Dockerfile
@@ -21,17 +21,8 @@
 
 FROM openjdk:8-alpine
 
-
 USER root
 
-RUN mkdir -p /root/keys/
-COPY endpoint1.crt /root/keys/
-COPY endpoint2.crt /root/keys/
-COPY ssn.crt /root/keys/
-COPY ssn.keystore.jks /root/keys/
+COPY billing-azure-2.2.jar /root/
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_gcp.sh /
-RUN chmod 755 /entrypoint_gcp.sh
-
-ENTRYPOINT ["/entrypoint_gcp.sh"]
+CMD java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 /root/billing-azure-2.2.jar --conf /root/billing.yml
\ No newline at end of file
diff --git a/services/billing-azure/billing.yml b/services/billing-azure/billing.yml
index da47bd1..5361d90 100644
--- a/services/billing-azure/billing.yml
+++ b/services/billing-azure/billing.yml
@@ -19,59 +19,74 @@
 #
 # ******************************************************************************
 
-# Authentication info
+spring:
+  main:
+    allow-bean-definition-overriding: true
+  data:
+    mongodb:
+      username: admin
+      password: MONGO_PASSWORD
+      database: dlabdb
+      port: MONGO_PORT
+      host: MONGO_HOST
 
-# Explicit azure authentication parameters
-clientId: <CLIENT_ID>
-clientSecret: <CLIENT_SECRET>
-tenantId: <TENANT_ID>
-subscriptionId: <SUBSCRIPTION_ID>
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
 
-# Contains authentication info (clientId, clientSecret, tenantId, subscriptionId) received after Azure CLI authentication
-# Overrides explicit azure authentication parameters above
-authenticationFile: <AUTHENTICATION_FILE>
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /home/OS_USER/keys/ssn.keystore.jks
+server.ssl.key-store-password: KEY_STORE_PASSWORD
+server.ssl.key-alias: ssn
 
-# Billing configuration for RateCard API. For more details please see https://msdn.microsoft.com/en-us/library/mt219004.aspx
-offerNumber: <OFFER_NUMBER>
-currency: <CURRENCY>
-locale: <LOCALE>
-regionInfo: <REGION_INFO>
-
-# Azure provides with aggregated data by date. Scheduler tries to retrieve data every <period> with <initialDelay> from
-# application startup in minutes.
-# Scheduler retrieves data only for the past period of time from midnight of start day to midnight of end date.
-# Scheduler does not retrieve data for the current date
-initialDelay: 10
-period: 60
-
-billingEnabled: false
-
-aggregationOutputMongoDataSource:
-    host: localhost
-    port: 27017
-    username: admin
-    password: <MONGODB_PASSWORD>
-    database: dlabdb
-
-ssnStorageAccountTagName: <AZURE_SSN_STORAGE_ACCOUNT_TAG>
-sharedStorageAccountTagName: <AZURE_SHARED_STORAGE_ACCOUNT_TAG>
-datalakeTagName: <AZURE_DATALAKE_TAG>
-
-# Logging configuration.
 logging:
-  # Default logging level
-  level: INFO
-  # Logging levels for appenders.
-  loggers:
-    com.epam: DEBUG
-    org.apache.http: WARN
-    org.mongodb.driver: WARN
-    org.hibernate: WARN
-  #Logging appenders
-  appenders:
-    #- type: console
-    - type: file
-      currentLogFilename: /var/opt/dlab/log/ssn/billing.log
-      archive: true
-      archivedLogFilenamePattern: /var/opt/dlab/log/ssn/billing-%d{yyyy-MM-dd}.log.gz
-      archivedFileCount: 10
\ No newline at end of file
+  file: /var/opt/dlab/log/ssn/billing.log
+  level:
+    com:
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: dlab
+  resource: KEYCLOAK_CLIENT_ID
+  credentials.secret: KEYCLOAK_CLIENT_SECRET
+  ssl-required: none
+  auth-server-url: KEYCLOAK_AUTH_SERVER_URL
+
+dlab:
+  sbn: SERVICE_BASE_NAME
+  billingEnabled: true
+
+  # Authentication info
+
+  # Explicit azure authentication parameters
+  clientId: CLIENT_ID
+  clientSecret: CLIENT_SECRET
+  tenantId: TENANT_ID
+  subscriptionId: SUBSCRIPTION_ID
+
+  # Contains authentication info (clientId, clientSecret, tenantId, subscriptionId) received after Azure CLI authentication
+  # Overrides explicit azure authentication parameters above
+  authenticationFile: AUTHENTICATION_FILE
+  # Billing configuration for RateCard API. For more details please see https://msdn.microsoft.com/en-us/library/mt219004.aspx
+  offerNumber: OFFER_NUMBER
+  currency: CURRENCY
+  locale: LOCALE
+  regionInfo: REGION_INFO
+
+  # Azure provides with aggregated data by date. Scheduler tries to retrieve data every <period> with <initialDelay> from
+  # application startup in minutes.
+  # Scheduler retrieves data only for the past period of time from midnight of start day to midnight of end date.
+  # Scheduler does not retrieve data for the current date
+  initialDelay: 10
+  period: 60
+  aggregationOutputMongoDataSource:
+    host: MONGO_HOST
+    port: MONGO_PORT
+    username: admin
+    password: MONGO_PASSWORD
+    database: dlabdb
+  ssnStorageAccountTagName: <AZURE_SSN_STORAGE_ACCOUNT_TAG>
+  sharedStorageAccountTagName: <AZURE_SHARED_STORAGE_ACCOUNT_TAG>
+  datalakeTagName: <AZURE_DATALAKE_TAG>
\ No newline at end of file
diff --git a/services/billing-azure/pom.xml b/services/billing-azure/pom.xml
index 41edc3c..75c8e35 100644
--- a/services/billing-azure/pom.xml
+++ b/services/billing-azure/pom.xml
@@ -28,16 +28,65 @@
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
-
     <artifactId>billing-azure</artifactId>
 
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-dependencies</artifactId>
+                <version>2.1.3.RELEASE</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+            <dependency>
+                <groupId>org.keycloak.bom</groupId>
+                <artifactId>keycloak-adapter-bom</artifactId>
+                <version>4.8.3.Final</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+
     <dependencies>
         <dependency>
-            <groupId>com.microsoft.azure</groupId>
-            <artifactId>azure-client-authentication</artifactId>
-            <version>1.2.1</version>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-configuration-processor</artifactId>
+            <optional>true</optional>
         </dependency>
-
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-data-mongodb</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-web</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-security</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.keycloak</groupId>
+            <artifactId>keycloak-spring-boot-starter</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.keycloak</groupId>
+            <artifactId>keycloak-spring-security-adapter</artifactId>
+            <version>4.8.3.Final</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-test</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-core</artifactId>
+            <version>${org.mockito.version}</version>
+            <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>com.epam.dlab</groupId>
             <artifactId>dlab-model</artifactId>
@@ -45,6 +94,12 @@
         </dependency>
 
         <dependency>
+            <groupId>com.microsoft.azure</groupId>
+            <artifactId>azure-client-authentication</artifactId>
+            <version>1.2.1</version>
+        </dependency>
+
+        <dependency>
             <groupId>org.mongodb</groupId>
             <artifactId>mongo-java-driver</artifactId>
             <version>${org.mongodb.version}</version>
@@ -82,51 +137,13 @@
     <build>
         <plugins>
             <plugin>
-                <artifactId>maven-shade-plugin</artifactId>
-                <version>${maven-shade-plugin.version}</version>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-maven-plugin</artifactId>
                 <executions>
                     <execution>
-                        <phase>package</phase>
                         <goals>
-                            <goal>shade</goal>
+                            <goal>repackage</goal>
                         </goals>
-                        <configuration>
-                            <createDependencyReducedPom>false</createDependencyReducedPom>
-                            <minimizeJar>true</minimizeJar>
-                            <filters>
-                                <filter>
-                                    <artifact>commons-logging:commons-logging</artifact>
-                                    <includes>**</includes>
-                                </filter>
-                                <filter>
-                                    <artifact>*:*</artifact>
-                                    <excludes>
-                                        <exclude>META-INF/*.SF</exclude>
-                                        <exclude>META-INF/*.DSA</exclude>
-                                        <exclude>META-INF/*.RSA</exclude>
-                                    </excludes>
-                                </filter>
-                            </filters>
-                            <transformers>
-                                <transformer
-                                        implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
-                                <transformer
-                                        implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
-                                    <mainClass>com.epam.dlab.billing.azure.BillingSchedulerAzure</mainClass>
-                                    <manifestEntries>
-                                        <Created-By>&lt;EPAM&gt; Systems</Created-By>
-                                        <Name>com/epam/dlab</Name>
-                                        <Implementation-Title>DLab Azure Billing Module</Implementation-Title>
-                                        <Implementation-Version>${dlab.version}</Implementation-Version>
-                                        <Implementation-Vendor>&lt;EPAM&gt; Systems</Implementation-Vendor>
-                                        <Build-Time>${maven.build.timestamp}</Build-Time>
-                                        <Build-OS>${os.name}</Build-OS>
-                                        <GIT-Branch>${scmBranch}</GIT-Branch>
-                                        <GIT-Commit>${buildNumber}</GIT-Commit>
-                                    </manifestEntries>
-                                </transformer>
-                            </transformers>
-                        </configuration>
                     </execution>
                 </executions>
             </plugin>
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureBillableResourcesService.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureBillableResourcesService.java
deleted file mode 100644
index c4ee5b4..0000000
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureBillableResourcesService.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.azure;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.billing.DlabResourceType;
-import com.epam.dlab.billing.azure.model.AzureDlabBillableResource;
-import com.epam.dlab.dto.UserInstanceDTO;
-import com.epam.dlab.dto.azure.edge.EdgeInfoAzure;
-import com.epam.dlab.dto.computational.UserComputationalResource;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.collect.Sets;
-import com.mongodb.client.FindIterable;
-import com.mongodb.client.model.Filters;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-import org.bson.Document;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import java.util.stream.StreamSupport;
-
-import static com.mongodb.client.model.Projections.exclude;
-import static com.mongodb.client.model.Projections.fields;
-
-/**
- * Helps to retrieve billable resources that are created in scope of DLab usage. Uses MongoDB as data source
- * for created resources
- */
-@Slf4j
-public class AzureBillableResourcesService {
-	private static final String SHARED_RESOURCE = "Shared resource";
-	private static final String[] USER_INSTANCES_EXCLUDED_FIELDS = {"scheduler_data", "last_activity",
-			"computational_resources.scheduler_data", "computational_resources.last_activity"};
-	private final ObjectMapper objectMapper = new ObjectMapper();
-
-	private MongoDbBillingClient mongoDbBillingClient;
-	private String serviceBaseName;
-	private String sharedStorageAccountTagName;
-	private String ssnStorageAccountTagName;
-	private String azureDataLakeTagName;
-
-	/**
-	 * Constructs the service class
-	 *
-	 * @param mongoDbBillingClient        mongodb client to retrieve all billable resources
-	 * @param sharedStorageAccountTagName shared storage account tag name
-	 * @param ssnStorageAccountTagName    ssn storage account tag name
-	 * @param azureDataLakeTagName        azure DataLake tag name
-	 */
-	public AzureBillableResourcesService(MongoDbBillingClient mongoDbBillingClient, String sharedStorageAccountTagName,
-										 String ssnStorageAccountTagName, String azureDataLakeTagName) {
-		this.mongoDbBillingClient = mongoDbBillingClient;
-
-		this.serviceBaseName = getConfigurationSettingValue(MongoKeyWords.SERVICE_BASE_NAME_KEY)
-				.replace('_', '-').toLowerCase();
-
-		this.sharedStorageAccountTagName = sharedStorageAccountTagName;
-		this.ssnStorageAccountTagName = ssnStorageAccountTagName;
-		this.azureDataLakeTagName = azureDataLakeTagName;
-	}
-
-
-	/**
-	 * Collects billable resources
-	 *
-	 * @return set of all billable resources that were created in scope by DLab from its installation to current time
-	 */
-	public Set<AzureDlabBillableResource> getBillableResources() {
-
-		Set<AzureDlabBillableResource> billableResources = new HashSet<>();
-
-		billableResources.addAll(getSsn());
-		billableResources.addAll(getDataLake());
-		billableResources.addAll(getEdgeAndStorageAccount());
-		billableResources.addAll(getNotebooksAndClusters());
-
-		List<AzureDlabBillableResource> list = new ArrayList<>(billableResources);
-		list.sort(Comparator.comparing(AzureDlabBillableResource::getId));
-
-		try {
-			log.debug("Billable resources is \n {}", objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString
-					(list));
-		} catch (JsonProcessingException e) {
-			log.debug("Error during pretty printing. Show simple list", e);
-			log.debug("Billable resources is {}", list);
-		}
-
-		return billableResources;
-	}
-
-	private String getConfigurationSettingValue(String key) {
-
-		Document document = mongoDbBillingClient.getDatabase().getCollection(MongoKeyWords.SETTINGS_COLLECTION)
-				.find(Filters.eq(MongoKeyWords.MONGO_ID, key)).first();
-
-		if (document != null) {
-			String value = document.getString("value");
-			if (StringUtils.isEmpty(value)) {
-				throw new IllegalStateException("Configuration " + key + " does not have value in settings");
-			}
-			log.info("Key {} has value {}", key, value);
-			return value;
-		} else {
-			throw new IllegalStateException("Configuration " + key + " is not present in settings");
-		}
-
-	}
-
-	private String getConfigurationSettingValueOrEmpty(String key) {
-		try {
-			return getConfigurationSettingValue(key);
-		} catch (IllegalStateException e) {
-			log.warn("key {} is not found", key, e);
-			return null;
-		}
-	}
-
-	private Set<AzureDlabBillableResource> getSsn() {
-
-		return Sets.newHashSet(
-				AzureDlabBillableResource.builder().id(serviceBaseName + "-ssn").type(DlabResourceType.SSN).build(),
-				AzureDlabBillableResource.builder().id(ssnStorageAccountTagName).type(DlabResourceType
-						.SSN_STORAGE_ACCOUNT).build(),
-				AzureDlabBillableResource.builder().id(sharedStorageAccountTagName).type(DlabResourceType
-						.COLLABORATION_STORAGE_ACCOUNT).build(),
-				AzureDlabBillableResource.builder().id(serviceBaseName + "-ssn-volume-primary")
-						.type(DlabResourceType.VOLUME).build()
-		);
-	}
-
-	private Set<AzureDlabBillableResource> getDataLake() {
-
-		if (azureDataLakeTagName != null) {
-			return Sets.newHashSet(AzureDlabBillableResource.builder().id(azureDataLakeTagName)
-					.type(DlabResourceType.DATA_LAKE_STORE).build());
-		}
-
-		return Sets.newHashSet();
-	}
-
-	private Set<AzureDlabBillableResource> getEdgeAndStorageAccount() {
-		Map<String, List<Document>> projectEndpoints = StreamSupport.stream(mongoDbBillingClient.getDatabase()
-				.getCollection("Projects").find().spliterator(), false)
-				.collect(Collectors.toMap(key -> key.getString("name").toLowerCase(),
-						value -> (List<Document>) value.get("endpoints")));
-
-		return projectEndpoints.entrySet()
-				.stream()
-				.flatMap(projectEndpoint -> getEdgeAndStoragePerProject(projectEndpoint.getKey(), projectEndpoint.getValue()))
-				.collect(Collectors.toSet());
-	}
-
-	private Stream<AzureDlabBillableResource> getEdgeAndStoragePerProject(String projectName, List<Document> endpoints) {
-		return endpoints
-				.stream()
-				.flatMap(endpoint -> {
-					try {
-						return getEdgeAndStorageAccount(projectName, objectMapper.readValue(
-								objectMapper.writeValueAsString(endpoint.get("edgeInfo")),
-								new com.fasterxml.jackson.core.type.TypeReference<EdgeInfoAzure>() {
-								})).stream();
-					} catch (IOException e) {
-						log.error("Error during preparation of billable resources", e);
-					}
-					return Stream.empty();
-				});
-	}
-
-	private Set<AzureDlabBillableResource> getEdgeAndStorageAccount(String projectName, EdgeInfoAzure edgeInfoAzure) {
-		Set<AzureDlabBillableResource> billableResources = new HashSet<>();
-
-		if (StringUtils.isNotEmpty(edgeInfoAzure.getUserContainerName())) {
-			billableResources.add(AzureDlabBillableResource.builder()
-					.id(edgeInfoAzure.getUserStorageAccountTagName())
-					.type(DlabResourceType.EDGE_STORAGE_ACCOUNT)
-					.user(SHARED_RESOURCE)
-					.project(projectName)
-					.build());
-		}
-
-		if (StringUtils.isNotEmpty(edgeInfoAzure.getInstanceId())) {
-			billableResources.add(AzureDlabBillableResource.builder()
-					.id(edgeInfoAzure.getInstanceId())
-					.type(DlabResourceType.EDGE)
-					.user(SHARED_RESOURCE)
-					.project(projectName)
-					.build());
-
-			billableResources.add(AzureDlabBillableResource.builder()
-					.id(edgeInfoAzure.getInstanceId() + "-volume-primary")
-					.type(DlabResourceType.VOLUME)
-					.user(SHARED_RESOURCE)
-					.project(projectName)
-					.build());
-		}
-
-		return billableResources;
-	}
-
-	private Set<AzureDlabBillableResource> getNotebooksAndClusters() {
-
-		Set<AzureDlabBillableResource> billableResources = new HashSet<>();
-
-		try {
-			final FindIterable<Document> userInstanceDocuments = mongoDbBillingClient.getDatabase()
-					.getCollection(MongoKeyWords.NOTEBOOK_COLLECTION)
-					.find()
-					.projection(fields(exclude(USER_INSTANCES_EXCLUDED_FIELDS)));
-			List<UserInstanceDTO> userInstanceDTOS = objectMapper.readValue(
-					objectMapper.writeValueAsString(userInstanceDocuments),
-					new com.fasterxml.jackson.core.type.TypeReference<List<UserInstanceDTO>>() {
-					});
-
-			if (userInstanceDTOS != null && !userInstanceDTOS.isEmpty()) {
-				userInstanceDTOS.forEach(e -> billableResources.addAll(getNotebookAndClusters(e)));
-			}
-
-		} catch (IOException e) {
-			log.error("Error during preparation of billable resources", e);
-		}
-
-		return billableResources;
-	}
-
-	private Set<AzureDlabBillableResource> getNotebookAndClusters(UserInstanceDTO userInstanceDTO) {
-		Set<AzureDlabBillableResource> notebookResources = new HashSet<>();
-
-		if (StringUtils.isNotEmpty(userInstanceDTO.getExploratoryId())) {
-			notebookResources.add(AzureDlabBillableResource.builder()
-					.id(userInstanceDTO.getExploratoryId())
-					.type(DlabResourceType.EXPLORATORY)
-					.user(userInstanceDTO.getUser())
-					.project(userInstanceDTO.getProject())
-					.notebookId(userInstanceDTO.getExploratoryId())
-					.resourceName(userInstanceDTO.getExploratoryName())
-					.build());
-			notebookResources.addAll(getVolumes(userInstanceDTO, userInstanceDTO.getExploratoryId(), "Volume primary",
-					"Volume secondary"));
-
-			if (userInstanceDTO.getResources() != null && !userInstanceDTO.getResources().isEmpty()) {
-				for (UserComputationalResource userComputationalResource : userInstanceDTO.getResources()) {
-					if (StringUtils.isNotEmpty(userComputationalResource.getComputationalId())) {
-
-						notebookResources.add(AzureDlabBillableResource.builder()
-								.id(userComputationalResource.getComputationalId())
-								.type(DlabResourceType.COMPUTATIONAL)
-								.user(userInstanceDTO.getUser())
-								.project(userInstanceDTO.getProject())
-								.notebookId(userInstanceDTO.getExploratoryId())
-								.resourceName(userComputationalResource.getComputationalName())
-								.build());
-						final List<AzureDlabBillableResource> volumes = getVolumes(userInstanceDTO,
-								userComputationalResource.getComputationalId(),
-								userComputationalResource.getComputationalName() + " volume primary",
-								userComputationalResource.getComputationalName() + " volume secondary");
-						notebookResources.addAll(volumes);
-
-					} else {
-						log.error("Computational with empty id {} is found in notebook {}. Skip it.",
-								userComputationalResource, userInstanceDTO);
-					}
-				}
-			}
-
-		} else {
-			log.error("Notebook {} with empty id id found. Skip it.", userInstanceDTO);
-		}
-
-		return notebookResources;
-	}
-
-	private List<AzureDlabBillableResource> getVolumes(UserInstanceDTO userInstanceDTO, String exploratoryId, String
-			primaryVolumeName, String secondaryVolumeName) {
-
-		return Arrays.asList(
-				AzureDlabBillableResource.builder()
-						.id(exploratoryId + "-volume-primary")
-						.type(DlabResourceType.VOLUME)
-						.user(userInstanceDTO.getUser())
-						.project(userInstanceDTO.getProject())
-						.notebookId(userInstanceDTO.getExploratoryId())
-						.resourceName(primaryVolumeName)
-						.build(),
-				AzureDlabBillableResource.builder()
-						.id(exploratoryId + "-volume-secondary")
-						.type(DlabResourceType.VOLUME)
-						.user(userInstanceDTO.getUser())
-						.project(userInstanceDTO.getProject())
-						.notebookId(userInstanceDTO.getExploratoryId())
-						.resourceName(secondaryVolumeName)
-						.build()
-		);
-	}
-}
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureBillingDetailsService.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureBillingDetailsService.java
deleted file mode 100644
index cd4f713..0000000
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureBillingDetailsService.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.azure;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.billing.BillingCalculationUtils;
-import com.epam.dlab.billing.DlabResourceType;
-import com.google.common.collect.Lists;
-import com.mongodb.client.AggregateIterable;
-import com.mongodb.client.FindIterable;
-import com.mongodb.client.model.*;
-import com.mongodb.client.result.UpdateResult;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Consumer;
-
-@Slf4j
-public class AzureBillingDetailsService {
-	private MongoDbBillingClient mongoDbBillingClient;
-	private String currencyCode;
-
-	public AzureBillingDetailsService(MongoDbBillingClient mongoDbBillingClient, String currencyCode) {
-		this.mongoDbBillingClient = mongoDbBillingClient;
-		this.currencyCode = currencyCode;
-	}
-
-	public void updateBillingDetails() {
-		final List<String> users = new ArrayList<>();
-		FindIterable<Document> iterable = mongoDbBillingClient.getDatabase()
-				.getCollection(MongoKeyWords.EDGE_COLLECTION)
-				.find().projection(Projections.include(MongoKeyWords.MONGO_ID));
-
-		for (Document document : iterable) {
-			String user = document.getString(MongoKeyWords.MONGO_ID);
-			if (StringUtils.isNotEmpty(user)) {
-				users.add(user);
-			} else {
-				log.warn("Empty user is found");
-			}
-		}
-
-		if (!users.isEmpty()) {
-			users.forEach(this::updateBillingDetails);
-		} else {
-			log.warn("No users found");
-		}
-	}
-
-	public void updateBillingDetails(String user) {
-		log.debug("Updating billing details for user {}", user);
-
-		try {
-			AggregateIterable<Document> aggregateIterable = mongoDbBillingClient.getDatabase()
-					.getCollection(MongoKeyWords.BILLING_DETAILS)
-					.aggregate(Lists.newArrayList(
-							Aggregates.match(
-									Filters.and(
-											Filters.eq(MongoKeyWords.DLAB_USER, user),
-											Filters.in(MongoKeyWords.RESOURCE_TYPE,
-													DlabResourceType.EXPLORATORY.toString(),
-													DlabResourceType.COMPUTATIONAL.toString(),
-													DlabResourceType.VOLUME.toString())
-									)
-							),
-
-							Aggregates.group(getGroupingFields(
-									MongoKeyWords.DLAB_ID,
-									MongoKeyWords.DLAB_USER,
-									MongoKeyWords.EXPLORATORY_ID,
-									MongoKeyWords.RESOURCE_TYPE,
-									MongoKeyWords.RESOURCE_NAME,
-									MongoKeyWords.COMPUTATIONAL_ID,
-									MongoKeyWords.METER_CATEGORY),
-									Accumulators.sum(MongoKeyWords.COST, MongoKeyWords.prepend$(MongoKeyWords.COST)),
-									Accumulators.min(MongoKeyWords.USAGE_FROM, MongoKeyWords.prepend$(MongoKeyWords
-											.USAGE_DAY)),
-									Accumulators.max(MongoKeyWords.USAGE_TO, MongoKeyWords.prepend$(MongoKeyWords
-											.USAGE_DAY))
-							),
-
-							Aggregates.sort(Sorts.ascending(
-									MongoKeyWords.prependId(MongoKeyWords.RESOURCE_NAME),
-									MongoKeyWords.prependId(MongoKeyWords.METER_CATEGORY)))
-							)
-					);
-
-			updateBillingDetails(user, mapToDetails(aggregateIterable));
-		} catch (RuntimeException e) {
-			log.error("Updating billing details for user {} is failed", user, e);
-		}
-	}
-
-	private List<Document> mapToDetails(AggregateIterable<Document> aggregateIterable) {
-		List<Document> billingDetails = new ArrayList<>();
-		for (Document document : aggregateIterable) {
-			Document oldRef = (Document) document.get(MongoKeyWords.MONGO_ID);
-			Document newDocument = new Document();
-
-			newDocument.append(MongoKeyWords.USAGE_FROM, document.getString(MongoKeyWords.USAGE_FROM));
-			newDocument.append(MongoKeyWords.USAGE_TO, document.getString(MongoKeyWords.USAGE_TO));
-			newDocument.append(MongoKeyWords.COST, document.getDouble(MongoKeyWords.COST));
-
-			newDocument.append(MongoKeyWords.METER_CATEGORY, oldRef.getString(MongoKeyWords.METER_CATEGORY));
-			newDocument.append(MongoKeyWords.RESOURCE_NAME, oldRef.getString(MongoKeyWords.RESOURCE_NAME));
-			newDocument.append(MongoKeyWords.EXPLORATORY_ID, oldRef.getString(MongoKeyWords.EXPLORATORY_ID));
-			newDocument.append(MongoKeyWords.RESOURCE_TYPE, oldRef.getString(MongoKeyWords.RESOURCE_TYPE));
-			newDocument.append(MongoKeyWords.CURRENCY_CODE, currencyCode);
-
-			billingDetails.add(newDocument);
-		}
-
-		return billingDetails;
-	}
-
-
-	private void updateBillingDetails(String user, List<Document> billingDetails) {
-		if (!billingDetails.isEmpty()) {
-			Map<String, List<Document>> info = new HashMap<>();
-
-			Consumer<Document> aggregator = e -> {
-
-				String notebookId = e.getString(MongoKeyWords.EXPLORATORY_ID);
-				List<Document> documents = info.get(notebookId);
-				if (documents == null) {
-					documents = new ArrayList<>();
-				}
-
-				documents.add(e);
-				info.put(notebookId, documents);
-			};
-
-			billingDetails.stream()
-					.filter(e -> DlabResourceType.EXPLORATORY.toString().equals(e.getString(MongoKeyWords
-							.RESOURCE_TYPE)))
-					.forEach(aggregator);
-
-			billingDetails.stream()
-					.filter(e -> DlabResourceType.COMPUTATIONAL.toString().equals(e.getString(MongoKeyWords
-							.RESOURCE_TYPE))).forEach(aggregator);
-
-			billingDetails.stream()
-					.filter(e -> DlabResourceType.VOLUME.toString().equals(e.getString(MongoKeyWords.RESOURCE_TYPE)))
-					.forEach(aggregator);
-
-
-			for (Map.Entry<String, List<Document>> entry : info.entrySet()) {
-				double sum = entry.getValue().stream().mapToDouble(e -> e.getDouble(MongoKeyWords.COST)).sum();
-
-				entry.getValue().forEach(e -> e.put(MongoKeyWords.COST_STRING,
-						BillingCalculationUtils.formatDouble(e.getDouble(MongoKeyWords.COST))));
-
-				log.debug("Update billing for notebook {}, cost is {} {}", entry.getKey(), sum, currencyCode);
-
-				Bson updates = Updates.combine(
-						Updates.set(MongoKeyWords.COST_STRING, BillingCalculationUtils.formatDouble(sum)),
-						Updates.set(MongoKeyWords.COST, sum),
-						Updates.set(MongoKeyWords.CURRENCY_CODE, currencyCode),
-						Updates.set(MongoKeyWords.BILLING_DETAILS, entry.getValue()));
-
-				UpdateResult updateResult = mongoDbBillingClient.getDatabase()
-						.getCollection(MongoKeyWords.NOTEBOOK_COLLECTION)
-						.updateOne(
-								Filters.and(
-										Filters.eq(MongoKeyWords.DLAB_USER, user),
-										Filters.eq(MongoKeyWords.EXPLORATORY_ID_OLD, entry.getKey())
-								),
-								updates
-						);
-
-				log.debug("Update result for {}/{} is {}", user, entry.getKey(), updateResult);
-			}
-		} else {
-			log.warn("No billing details found for notebooks for user {}", user);
-		}
-	}
-
-
-	private Document getGroupingFields(String... fieldNames) {
-		Document d = new Document();
-		for (String name : fieldNames) {
-			d.put(name, "$" + name);
-		}
-		return d;
-	}
-}
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureInvoiceCalculationService.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureInvoiceCalculationService.java
index 0fd6098..b3eec4f 100644
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureInvoiceCalculationService.java
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/AzureInvoiceCalculationService.java
@@ -37,7 +37,10 @@
 
 import java.io.IOException;
 import java.net.URISyntaxException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 
 /**
@@ -61,13 +64,9 @@
 	 * Constructs service class
 	 *
 	 * @param billingConfigurationAzure contains <code>billing-azure</code> module configuration
-	 * @param billableResources         resources that invoices should be calculated for
 	 */
-	public AzureInvoiceCalculationService(BillingConfigurationAzure billingConfigurationAzure,
-										  Set<AzureDlabBillableResource> billableResources) {
+	public AzureInvoiceCalculationService(BillingConfigurationAzure billingConfigurationAzure) {
 		this.billingConfigurationAzure = billingConfigurationAzure;
-		this.billableResources = billableResources.stream().collect(Collectors.toMap(AzureDlabBillableResource::getId,
-                e -> e));
 	}
 
 	/**
@@ -135,8 +134,6 @@
 
 		if (usageAggregateRecordList != null && !usageAggregateRecordList.isEmpty()) {
 			log.info("Processing {} usage records", usageAggregateRecordList.size());
-
-
 			usageAggregateRecordList = usageAggregateRecordList.stream().filter(e ->
 					matchProperStructure(e) && isBillableDlabResource(e))
 					.collect(Collectors.toList());
@@ -164,7 +161,6 @@
 	}
 
 	private boolean matchProperStructure(UsageAggregateRecord record) {
-
 		if (record.getProperties() == null) {
 			return false;
 		}
@@ -181,13 +177,10 @@
 
 	private boolean isBillableDlabResource(UsageAggregateRecord record) {
 		String dlabId = record.getProperties().getParsedInstanceData().getMicrosoftResources().getTags().get("Name");
-		return dlabId != null && !dlabId.isEmpty() && billableResources.containsKey(dlabId);
+		return dlabId != null && !dlabId.isEmpty() && dlabId.startsWith(billingConfigurationAzure.getSbn());
 	}
 
-	private AzureDailyResourceInvoice calculateInvoice(Map<String, Meter> rates, UsageAggregateRecord record,
-													   String dlabId) {
-
-		AzureDlabBillableResource azureDlabBillableResource = billableResources.get(dlabId);
+	private AzureDailyResourceInvoice calculateInvoice(Map<String, Meter> rates, UsageAggregateRecord record, String dlabId) {
 		String meterId = record.getProperties().getMeterId();
 		Meter rateCard = rates.get(meterId);
 
@@ -196,34 +189,25 @@
 			if (meterRates != null) {
 				Double rate = meterRates.get(AzureRateCardClient.MAIN_RATE_KEY);
 				if (rate != null) {
-
-					AzureDailyResourceInvoice azureDailyResourceInvoice = new AzureDailyResourceInvoice
-                            (azureDlabBillableResource);
-					azureDailyResourceInvoice.setUsageStartDate(record.getProperties().getUsageStartTime());
-					azureDailyResourceInvoice.setUsageEndDate(record.getProperties().getUsageEndTime());
-					azureDailyResourceInvoice.setMeterCategory(record.getProperties().getMeterCategory());
-					azureDailyResourceInvoice.setCost(
-							BillingCalculationUtils.round(rate * record.getProperties().getQuantity(), 2));
-					azureDailyResourceInvoice.setDay(getDay(record.getProperties().getUsageStartTime()));
-					azureDailyResourceInvoice.setCurrencyCode(billingConfigurationAzure.getCurrency());
-
-					log.trace("Generated invoice for azure resource {}", azureDailyResourceInvoice);
-
-					return azureDailyResourceInvoice;
-
+					return AzureDailyResourceInvoice.builder()
+							.dlabId(dlabId)
+							.usageStartDate(getDay(record.getProperties().getUsageStartTime()))
+							.usageEndDate(getDay(record.getProperties().getUsageEndTime()))
+							.meterCategory(record.getProperties().getMeterCategory())
+							.cost(BillingCalculationUtils.round(rate * record.getProperties().getQuantity(), 3))
+							.day(getDay(record.getProperties().getUsageStartTime()))
+							.currencyCode(billingConfigurationAzure.getCurrency())
+							.build();
 				} else {
-					log.error("Rate Card {} has no rate for meter id {} and rate id {}. Skip record {}. Azure resource" +
-                                    " {}",
-							rateCard, meterId, AzureRateCardClient.MAIN_RATE_KEY, record, azureDlabBillableResource);
+					log.error("Rate Card {} has no rate for meter id {} and rate id {}. Skip record {}.",
+							rateCard, meterId, AzureRateCardClient.MAIN_RATE_KEY, record);
 				}
 			} else {
-				log.error("Rate Card {} has no meter rates fro meter id {}. Skip record {}. Azure resource {}",
-						rateCard, meterId, record, azureDlabBillableResource);
+				log.error("Rate Card {} has no meter rates fro meter id {}. Skip record {}",
+						rateCard, meterId, record);
 			}
 		} else {
-			log.error("Meter rate {} form usage aggregate is not found in rate card. Skip record {}.  Azure resource " +
-                            "{}",
-					meterId, record, azureDlabBillableResource);
+			log.error("Meter rate {} form usage aggregate is not found in rate card. Skip record {}.", meterId, record);
 		}
 
 		return null;
@@ -231,7 +215,6 @@
 
 	private String getNewToken() {
 		try {
-
 			log.info("Requesting authentication token ... ");
 			ApplicationTokenCredentials applicationTokenCredentials = new ApplicationTokenCredentials(
 					billingConfigurationAzure.getClientId(),
@@ -247,7 +230,6 @@
 	}
 
 	private String getDay(String dateTime) {
-
 		if (dateTime != null) {
 			String[] parts = dateTime.split("T");
 			if (parts.length == 2) {
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/BillingAzureApplication.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/BillingAzureApplication.java
new file mode 100644
index 0000000..1a40767
--- /dev/null
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/BillingAzureApplication.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.billing.azure;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.data.mongodb.repository.config.EnableMongoRepositories;
+
+@SpringBootApplication
+@EnableMongoRepositories
+@EnableConfigurationProperties
+public class BillingAzureApplication {
+
+    public static void main(String[] args) {
+        SpringApplication.run(BillingAzureApplication.class, args);
+    }
+
+}
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/BillingSchedulerAzure.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/BillingSchedulerAzure.java
deleted file mode 100644
index d0b4dba..0000000
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/BillingSchedulerAzure.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.azure;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.billing.azure.config.AzureAuthFile;
-import com.epam.dlab.billing.azure.config.BillingConfigurationAzure;
-import com.epam.dlab.billing.azure.logging.AppenderConsole;
-import com.epam.dlab.billing.azure.logging.AppenderFile;
-import com.epam.dlab.billing.azure.model.AzureDailyResourceInvoice;
-import com.epam.dlab.billing.azure.model.AzureDlabBillableResource;
-import com.epam.dlab.billing.azure.model.BillingPeriod;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.exceptions.InitializationException;
-import com.epam.dlab.util.mongo.modules.IsoDateModule;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-import com.fasterxml.jackson.datatype.guava.GuavaModule;
-import com.mongodb.BasicDBObject;
-import com.mongodb.client.model.Filters;
-import com.mongodb.client.model.UpdateOptions;
-import com.mongodb.client.result.UpdateResult;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-@Slf4j
-public class BillingSchedulerAzure {
-	private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
-	private BillingConfigurationAzure billingConfigurationAzure;
-	private MongoDbBillingClient mongoDbBillingClient;
-
-	public BillingSchedulerAzure(String filePath) throws IOException, InitializationException {
-		try (FileInputStream fin = new FileInputStream(filePath)) {
-			final ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory()).registerModule(new GuavaModule());
-			objectMapper.registerSubtypes(AppenderFile.class, AppenderConsole.class);
-			this.billingConfigurationAzure = objectMapper.readValue(fin,
-							BillingConfigurationAzure.class);
-
-			Path path = Paths.get(billingConfigurationAzure.getAuthenticationFile());
-
-			if (path.toFile().exists()) {
-
-				log.info("Read and override configs using auth file");
-
-				try {
-					AzureAuthFile azureAuthFile = new ObjectMapper().readValue(path.toFile(), AzureAuthFile.class);
-					this.billingConfigurationAzure.setClientId(azureAuthFile.getClientId());
-					this.billingConfigurationAzure.setClientSecret(azureAuthFile.getClientSecret());
-					this.billingConfigurationAzure.setTenantId(azureAuthFile.getTenantId());
-					this.billingConfigurationAzure.setSubscriptionId(azureAuthFile.getSubscriptionId());
-				} catch (IOException e) {
-					log.error("Cannot read configuration file", e);
-					throw e;
-				}
-				log.info("Configs from auth file are used");
-			} else {
-				log.info("Configs from yml file are used");
-			}
-
-			this.mongoDbBillingClient = new MongoDbBillingClient
-					(billingConfigurationAzure.getAggregationOutputMongoDataSource().getHost(),
-							billingConfigurationAzure.getAggregationOutputMongoDataSource().getPort(),
-							billingConfigurationAzure.getAggregationOutputMongoDataSource().getDatabase(),
-							billingConfigurationAzure.getAggregationOutputMongoDataSource().getUsername(),
-							billingConfigurationAzure.getAggregationOutputMongoDataSource().getPassword());
-			this.billingConfigurationAzure.getLogging().configure();
-		}
-	}
-
-	public static void main(String[] args) throws Exception {
-		if (args != null && args.length == 2) {
-			BillingSchedulerAzure billingSchedulerAzure = new BillingSchedulerAzure(args[1]);
-			billingSchedulerAzure.start();
-
-		} else {
-			log.info("Wrong arguments. Please provide with path to billing configuration");
-		}
-	}
-
-	public void start() {
-		if (billingConfigurationAzure.isBillingEnabled()) {
-			executorService.scheduleWithFixedDelay(new CalculateBilling(billingConfigurationAzure,
-							mongoDbBillingClient), billingConfigurationAzure.getInitialDelay(),
-					billingConfigurationAzure.getPeriod(), TimeUnit.MINUTES);
-		} else {
-			log.info("======Billing is disabled======");
-		}
-	}
-
-	public void stop() {
-		try {
-			log.info("Stopping Azure billing scheduler");
-			if (!executorService.awaitTermination(30, TimeUnit.SECONDS)) {
-				log.error("Force shut down");
-				executorService.shutdownNow();
-			}
-			mongoDbBillingClient.getClient().close();
-		} catch (InterruptedException e) {
-			executorService.shutdownNow();
-			mongoDbBillingClient.getClient().close();
-			Thread.currentThread().interrupt();
-		}
-	}
-
-
-	@Slf4j
-	private static class CalculateBilling implements Runnable {
-		private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormat.forPattern
-				("yyyy-MM-dd'T'HH:mm:ss" +
-						".SSS'Z");
-		private static final String SCHEDULER_ID = "azureBillingScheduler";
-		private AzureBillingDetailsService azureBillingDetailsService;
-		private BillingConfigurationAzure billingConfigurationAzure;
-		private MongoDbBillingClient client;
-		private ObjectMapper objectMapper = new ObjectMapper().registerModule(new IsoDateModule());
-
-
-		public CalculateBilling(BillingConfigurationAzure billingConfigurationAzure, MongoDbBillingClient client) {
-			this.billingConfigurationAzure = billingConfigurationAzure;
-			this.client = client;
-			this.azureBillingDetailsService = new AzureBillingDetailsService(client,
-					billingConfigurationAzure.getCurrency());
-		}
-
-		@Override
-		public void run() {
-			try {
-				BillingPeriod billingPeriod = getBillingPeriod();
-				DateTime currentTime = new DateTime().withZone(DateTimeZone.UTC);
-				if (billingPeriod == null) {
-					saveBillingPeriod(initialSchedulerInfo(currentTime));
-				} else {
-					log.info("Billing period from db is {}", billingPeriod);
-
-					if (shouldTriggerJobByTime(currentTime, billingPeriod)) {
-
-						boolean hasNew = run(billingPeriod);
-						if (hasNew) {
-							log.info("Updating billing details");
-							azureBillingDetailsService.updateBillingDetails();
-						}
-
-
-						updateBillingPeriod(billingPeriod, currentTime, hasNew);
-					}
-				}
-			} catch (RuntimeException e) {
-				log.error("Cannot update billing information", e);
-			}
-		}
-
-		private BillingPeriod initialSchedulerInfo(DateTime currentTime) {
-
-			BillingPeriod initialBillingPeriod = new BillingPeriod();
-			initialBillingPeriod.setFrom(currentTime.minusDays(2).toDateMidnight().toDate());
-			initialBillingPeriod.setTo(currentTime.toDateMidnight().toDate());
-
-			log.info("Initial scheduler info {}", initialBillingPeriod);
-
-			return initialBillingPeriod;
-
-		}
-
-		private boolean shouldTriggerJobByTime(DateTime currentTime, BillingPeriod billingPeriod) {
-
-			DateTime dateTimeToFromBillingPeriod = new DateTime(billingPeriod.getTo()).withZone(DateTimeZone.UTC);
-
-			log.info("Comparing current time[{}, {}] and from scheduler info [{}, {}]", currentTime,
-					currentTime.toDateMidnight(),
-					dateTimeToFromBillingPeriod, dateTimeToFromBillingPeriod.toDateMidnight());
-
-			if (currentTime.toDateMidnight().isAfter(dateTimeToFromBillingPeriod.toDateMidnight())
-					|| currentTime.toDateMidnight().isEqual(dateTimeToFromBillingPeriod.toDateMidnight())) {
-				log.info("Should trigger the job by time");
-				return true;
-			}
-
-			log.info("Should not trigger the job by time");
-			return false;
-		}
-
-		private boolean run(BillingPeriod billingPeriod) {
-
-			AzureBillableResourcesService azureBillableResourcesService = new AzureBillableResourcesService(client,
-					billingConfigurationAzure.getSharedStorageAccountTagName(),
-					billingConfigurationAzure.getSsnStorageAccountTagName(),
-					billingConfigurationAzure.getDatalakeTagName());
-			Set<AzureDlabBillableResource> billableResources = azureBillableResourcesService.getBillableResources();
-
-			AzureInvoiceCalculationService azureInvoiceCalculationService
-					= new AzureInvoiceCalculationService(billingConfigurationAzure, billableResources);
-
-			List<AzureDailyResourceInvoice> dailyInvoices = azureInvoiceCalculationService.generateInvoiceData(
-					DATE_TIME_FORMATTER.print(new DateTime(billingPeriod.getFrom()).withZone(DateTimeZone.UTC)),
-					DATE_TIME_FORMATTER.print(new DateTime(billingPeriod.getTo()).withZone(DateTimeZone.UTC)));
-
-
-			if (!dailyInvoices.isEmpty()) {
-
-				client.getDatabase().getCollection(MongoKeyWords.BILLING_DETAILS)
-						.insertMany(dailyInvoices.stream().map(AzureDailyResourceInvoice::to)
-								.collect(Collectors.toList()));
-
-				return true;
-
-			} else {
-				log.warn("Daily invoices is empty for period {}", billingPeriod);
-
-				return false;
-			}
-		}
-
-		private void updateBillingPeriod(BillingPeriod billingPeriod, DateTime currentTime, boolean updates) {
-
-			try {
-				client.getDatabase().getCollection(MongoKeyWords.AZURE_BILLING_SCHEDULER_HISTORY).insertOne(
-						Document.parse(objectMapper.writeValueAsString(billingPeriod)).append("updates", updates));
-				log.debug("History of billing periods is updated with {}",
-						objectMapper.writeValueAsString(billingPeriod));
-			} catch (JsonProcessingException e) {
-				log.error("Cannot update history of billing periods", e);
-
-			}
-
-			billingPeriod.setFrom(billingPeriod.getTo());
-
-			if (new DateTime(billingPeriod.getFrom()).withZone(DateTimeZone.UTC).toDateMidnight()
-					.isEqual(currentTime.toDateMidnight())) {
-
-				log.info("Setting billing to one day later");
-				billingPeriod.setTo(currentTime.plusDays(1).toDateMidnight().toDate());
-
-			} else {
-				billingPeriod.setTo(currentTime.toDateMidnight().toDate());
-			}
-
-			saveBillingPeriod(billingPeriod);
-		}
-
-		private boolean saveBillingPeriod(BillingPeriod billingPeriod) {
-			log.debug("Saving billing period {}", billingPeriod);
-
-			try {
-				UpdateResult updateResult = client.getDatabase().getCollection(MongoKeyWords.AZURE_BILLING_SCHEDULER)
-						.updateMany(Filters.eq(MongoKeyWords.MONGO_ID, SCHEDULER_ID),
-								new BasicDBObject("$set",
-										Document.parse(objectMapper.writeValueAsString(billingPeriod))
-												.append(MongoKeyWords.MONGO_ID, SCHEDULER_ID))
-								, new UpdateOptions().upsert(true)
-						);
-
-				log.debug("Billing period save operation result is {}", updateResult);
-				return true;
-			} catch (JsonProcessingException e) {
-				log.error("Cannot save billing period", e);
-			}
-
-			return false;
-		}
-
-		private BillingPeriod getBillingPeriod() {
-			log.debug("Get billing period");
-
-			try {
-				Document document = client.getDatabase().getCollection(MongoKeyWords.AZURE_BILLING_SCHEDULER)
-						.find(Filters.eq(MongoKeyWords.MONGO_ID, SCHEDULER_ID)).first();
-
-				log.debug("Retrieved billing period document {}", document);
-				if (document != null) {
-					return objectMapper.readValue(document.toJson(), BillingPeriod.class);
-				}
-
-				return null;
-
-			} catch (IOException e) {
-				log.error("Cannot save billing period", e);
-				throw new DlabException("Cannot parse string", e);
-			}
-		}
-	}
-}
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/CalculateBillingService.java
similarity index 80%
copy from integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
copy to services/billing-azure/src/main/java/com/epam/dlab/billing/azure/CalculateBillingService.java
index 1e49a60..d432337 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/CalculateBillingService.java
@@ -17,7 +17,12 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.docker;
+package com.epam.dlab.billing.azure;
 
-class Labels {
+import com.epam.dlab.dto.billing.BillingData;
+
+import java.util.List;
+
+public interface CalculateBillingService {
+    List<BillingData> getBillingData();
 }
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/CalculateBillingServiceImpl.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/CalculateBillingServiceImpl.java
new file mode 100644
index 0000000..3b3d60b
--- /dev/null
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/CalculateBillingServiceImpl.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.billing.azure;
+
+import com.epam.dlab.MongoKeyWords;
+import com.epam.dlab.billing.azure.config.AzureAuthFile;
+import com.epam.dlab.billing.azure.config.BillingConfigurationAzure;
+import com.epam.dlab.billing.azure.model.AzureDailyResourceInvoice;
+import com.epam.dlab.billing.azure.model.BillingPeriod;
+import com.epam.dlab.dto.billing.BillingData;
+import com.epam.dlab.exceptions.DlabException;
+import com.epam.dlab.util.mongo.modules.IsoDateModule;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.mongodb.BasicDBObject;
+import com.mongodb.client.model.Filters;
+import com.mongodb.client.model.UpdateOptions;
+import com.mongodb.client.result.UpdateResult;
+import lombok.extern.slf4j.Slf4j;
+import org.bson.Document;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.LocalDate;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Collectors;
+
+@Slf4j
+@Service
+public class CalculateBillingServiceImpl implements CalculateBillingService {
+    private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z");
+    private static final String SCHEDULER_ID = "azureBillingScheduler";
+    private final BillingConfigurationAzure billingConfigurationAzure;
+    private final MongoDbBillingClient mongoDbBillingClient;
+    private ObjectMapper objectMapper;
+
+    @Autowired
+    public CalculateBillingServiceImpl(BillingConfigurationAzure configuration) throws IOException {
+        billingConfigurationAzure = configuration;
+        objectMapper = new ObjectMapper().registerModule(new IsoDateModule());
+        Path path = Paths.get(billingConfigurationAzure.getAuthenticationFile());
+
+        if (path.toFile().exists()) {
+            log.info("Read and override configs using auth file");
+            try {
+                AzureAuthFile azureAuthFile = new ObjectMapper().readValue(path.toFile(), AzureAuthFile.class);
+                this.billingConfigurationAzure.setClientId(azureAuthFile.getClientId());
+                this.billingConfigurationAzure.setClientSecret(azureAuthFile.getClientSecret());
+                this.billingConfigurationAzure.setTenantId(azureAuthFile.getTenantId());
+                this.billingConfigurationAzure.setSubscriptionId(azureAuthFile.getSubscriptionId());
+            } catch (IOException e) {
+                log.error("Cannot read configuration file", e);
+                throw e;
+            }
+            log.info("Configs from auth file are used");
+        } else {
+            log.info("Configs from yml file are used");
+        }
+
+        this.mongoDbBillingClient = new MongoDbBillingClient
+                (billingConfigurationAzure.getAggregationOutputMongoDataSource().getHost(),
+                        billingConfigurationAzure.getAggregationOutputMongoDataSource().getPort(),
+                        billingConfigurationAzure.getAggregationOutputMongoDataSource().getDatabase(),
+                        billingConfigurationAzure.getAggregationOutputMongoDataSource().getUsername(),
+                        billingConfigurationAzure.getAggregationOutputMongoDataSource().getPassword());
+    }
+
+    @Override
+    public List<BillingData> getBillingData() {
+        try {
+            BillingPeriod billingPeriod = getBillingPeriod();
+            DateTime currentTime = new DateTime().withZone(DateTimeZone.UTC);
+            if (billingPeriod == null) {
+                saveBillingPeriod(initialSchedulerInfo(currentTime));
+            } else {
+                log.info("Billing period from db is {}", billingPeriod);
+
+                if (shouldTriggerJobByTime(currentTime, billingPeriod)) {
+                    List<BillingData> billingData = getBillingData(billingPeriod);
+                    boolean hasNew = !billingData.isEmpty();
+                    updateBillingPeriod(billingPeriod, currentTime, hasNew);
+                    return billingData;
+                }
+            }
+        } catch (RuntimeException e) {
+            log.error("Cannot update billing information", e);
+        }
+        return Collections.emptyList();
+    }
+
+    private BillingPeriod initialSchedulerInfo(DateTime currentTime) {
+
+        BillingPeriod initialBillingPeriod = new BillingPeriod();
+        initialBillingPeriod.setFrom(currentTime.minusDays(2).toDateMidnight().toDate());
+        initialBillingPeriod.setTo(currentTime.toDateMidnight().toDate());
+
+        log.info("Initial scheduler info {}", initialBillingPeriod);
+
+        return initialBillingPeriod;
+
+    }
+
+    private boolean shouldTriggerJobByTime(DateTime currentTime, BillingPeriod billingPeriod) {
+
+        DateTime dateTimeToFromBillingPeriod = new DateTime(billingPeriod.getTo()).withZone(DateTimeZone.UTC);
+
+        log.info("Comparing current time[{}, {}] and from scheduler info [{}, {}]", currentTime,
+                currentTime.toDateMidnight(),
+                dateTimeToFromBillingPeriod, dateTimeToFromBillingPeriod.toDateMidnight());
+
+        if (currentTime.toDateMidnight().isAfter(dateTimeToFromBillingPeriod.toDateMidnight())
+                || currentTime.toDateMidnight().isEqual(dateTimeToFromBillingPeriod.toDateMidnight())) {
+            log.info("Should trigger the job by time");
+            return true;
+        }
+
+        log.info("Should not trigger the job by time");
+        return false;
+    }
+
+    private List<BillingData> getBillingData(BillingPeriod billingPeriod) {
+        AzureInvoiceCalculationService azureInvoiceCalculationService
+                = new AzureInvoiceCalculationService(billingConfigurationAzure);
+
+        List<AzureDailyResourceInvoice> dailyInvoices = azureInvoiceCalculationService.generateInvoiceData(
+                DATE_TIME_FORMATTER.print(new DateTime(billingPeriod.getFrom()).withZone(DateTimeZone.UTC)),
+                DATE_TIME_FORMATTER.print(new DateTime(billingPeriod.getTo()).withZone(DateTimeZone.UTC)));
+
+        if (!dailyInvoices.isEmpty()) {
+            return dailyInvoices
+                    .stream()
+                    .map(this::toBillingData)
+                    .collect(Collectors.toList());
+        } else {
+            log.warn("Daily invoices is empty for period {}", billingPeriod);
+            return Collections.emptyList();
+        }
+    }
+
+    private void updateBillingPeriod(BillingPeriod billingPeriod, DateTime currentTime, boolean updates) {
+
+        try {
+            mongoDbBillingClient.getDatabase().getCollection(MongoKeyWords.AZURE_BILLING_SCHEDULER_HISTORY).insertOne(
+                    Document.parse(objectMapper.writeValueAsString(billingPeriod)).append("updates", updates));
+            log.debug("History of billing periods is updated with {}",
+                    objectMapper.writeValueAsString(billingPeriod));
+        } catch (JsonProcessingException e) {
+            log.error("Cannot update history of billing periods", e);
+
+        }
+
+        billingPeriod.setFrom(billingPeriod.getTo());
+
+        if (new DateTime(billingPeriod.getFrom()).withZone(DateTimeZone.UTC).toDateMidnight()
+                .isEqual(currentTime.toDateMidnight())) {
+
+            log.info("Setting billing to one day later");
+            billingPeriod.setTo(currentTime.plusDays(1).toDateMidnight().toDate());
+
+        } else {
+            billingPeriod.setTo(currentTime.toDateMidnight().toDate());
+        }
+
+        saveBillingPeriod(billingPeriod);
+    }
+
+    private boolean saveBillingPeriod(BillingPeriod billingPeriod) {
+        log.debug("Saving billing period {}", billingPeriod);
+
+        try {
+            UpdateResult updateResult = mongoDbBillingClient.getDatabase().getCollection(MongoKeyWords.AZURE_BILLING_SCHEDULER)
+                    .updateMany(Filters.eq(MongoKeyWords.MONGO_ID, SCHEDULER_ID),
+                            new BasicDBObject("$set",
+                                    Document.parse(objectMapper.writeValueAsString(billingPeriod))
+                                            .append(MongoKeyWords.MONGO_ID, SCHEDULER_ID))
+                            , new UpdateOptions().upsert(true)
+                    );
+
+            log.debug("Billing period save operation result is {}", updateResult);
+            return true;
+        } catch (JsonProcessingException e) {
+            log.error("Cannot save billing period", e);
+        }
+
+        return false;
+    }
+
+    private BillingPeriod getBillingPeriod() {
+        log.debug("Get billing period");
+
+        try {
+            Document document = mongoDbBillingClient.getDatabase().getCollection(MongoKeyWords.AZURE_BILLING_SCHEDULER)
+                    .find(Filters.eq(MongoKeyWords.MONGO_ID, SCHEDULER_ID)).first();
+
+            log.debug("Retrieved billing period document {}", document);
+            if (document != null) {
+                return objectMapper.readValue(document.toJson(), BillingPeriod.class);
+            }
+
+            return null;
+
+        } catch (IOException e) {
+            log.error("Cannot save billing period", e);
+            throw new DlabException("Cannot parse string", e);
+        }
+    }
+
+    private BillingData toBillingData(AzureDailyResourceInvoice billingData) {
+        return BillingData.builder()
+                .tag(billingData.getDlabId().toLowerCase())
+                .usageDateFrom(Optional.ofNullable(billingData.getUsageStartDate()).map(LocalDate::parse).orElse(null))
+                .usageDateTo(Optional.ofNullable(billingData.getUsageEndDate()).map(LocalDate::parse).orElse(null))
+                .usageDate(billingData.getDay())
+                .product(billingData.getMeterCategory())
+                .cost(billingData.getCost())
+                .currency(billingData.getCurrencyCode())
+                .build();
+    }
+}
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/config/BillingConfigurationAzure.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/config/BillingConfigurationAzure.java
index 4bd69ce..0a28828 100644
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/config/BillingConfigurationAzure.java
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/config/BillingConfigurationAzure.java
@@ -20,9 +20,14 @@
 package com.epam.dlab.billing.azure.config;
 
 import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.context.annotation.Configuration;
 
+@Configuration
+@ConfigurationProperties("dlab")
 @Data
 public class BillingConfigurationAzure {
+    private String sbn;
     private long initialDelay;
     private long period;
 
@@ -43,6 +48,5 @@
     private String sharedStorageAccountTagName;
     private String datalakeTagName;
 
-    private LoggingConfigurationFactory logging;
     private AggregationOutputMongoDataSource aggregationOutputMongoDataSource;
 }
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/config/SecurityConfig.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/config/SecurityConfig.java
new file mode 100644
index 0000000..35e341c
--- /dev/null
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/config/SecurityConfig.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.billing.azure.config;
+
+import org.keycloak.adapters.KeycloakConfigResolver;
+import org.keycloak.adapters.springboot.KeycloakSpringBootConfigResolver;
+import org.keycloak.adapters.springsecurity.KeycloakConfiguration;
+import org.keycloak.adapters.springsecurity.authentication.KeycloakAuthenticationProvider;
+import org.keycloak.adapters.springsecurity.config.KeycloakWebSecurityConfigurerAdapter;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.annotation.Bean;
+import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
+import org.springframework.security.config.annotation.web.builders.HttpSecurity;
+import org.springframework.security.core.authority.mapping.SimpleAuthorityMapper;
+import org.springframework.security.core.session.SessionRegistryImpl;
+import org.springframework.security.web.authentication.session.RegisterSessionAuthenticationStrategy;
+import org.springframework.security.web.authentication.session.SessionAuthenticationStrategy;
+
+@KeycloakConfiguration
+class SecurityConfig extends KeycloakWebSecurityConfigurerAdapter {
+
+    @Autowired
+    public void configureGlobal(AuthenticationManagerBuilder auth) {
+        KeycloakAuthenticationProvider keycloakAuthenticationProvider = keycloakAuthenticationProvider();
+        keycloakAuthenticationProvider.setGrantedAuthoritiesMapper(new SimpleAuthorityMapper());
+        auth.authenticationProvider(keycloakAuthenticationProvider);
+    }
+
+    @Bean
+    public KeycloakConfigResolver KeycloakConfigResolver() {
+        return new KeycloakSpringBootConfigResolver();
+    }
+
+    @Bean
+    @Override
+    protected SessionAuthenticationStrategy sessionAuthenticationStrategy() {
+        return new RegisterSessionAuthenticationStrategy(new SessionRegistryImpl());
+    }
+
+    @Override
+    protected void configure(HttpSecurity http) throws Exception {
+        super.configure(http);
+        http
+                .anonymous().disable()
+                .authorizeRequests()
+                .anyRequest()
+                .authenticated();
+    }
+}
\ No newline at end of file
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/controller/BillingController.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/controller/BillingController.java
new file mode 100644
index 0000000..9018791
--- /dev/null
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/controller/BillingController.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.billing.azure.controller;
+
+import com.epam.dlab.billing.azure.CalculateBillingService;
+import com.epam.dlab.dto.billing.BillingData;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RestController;
+
+import java.util.List;
+
+@RestController
+public class BillingController {
+
+    private final CalculateBillingService billingService;
+
+    public BillingController(CalculateBillingService billingService) {
+        this.billingService = billingService;
+    }
+
+    @GetMapping
+    public ResponseEntity<List<BillingData>> getBilling() {
+        return new ResponseEntity<>(billingService.getBillingData(), HttpStatus.OK);
+    }
+}
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/model/AzureDailyResourceInvoice.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/model/AzureDailyResourceInvoice.java
index ff132a2..486ddd5 100644
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/model/AzureDailyResourceInvoice.java
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/model/AzureDailyResourceInvoice.java
@@ -19,35 +19,21 @@
 
 package com.epam.dlab.billing.azure.model;
 
-import com.epam.dlab.billing.DlabResourceType;
 import com.epam.dlab.billing.azure.MongoDocument;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Builder;
 import lombok.Data;
 import lombok.EqualsAndHashCode;
-import lombok.NoArgsConstructor;
 
 @Data
-@NoArgsConstructor
+@Builder
 @EqualsAndHashCode(callSuper = true)
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class AzureDailyResourceInvoice extends MongoDocument<AzureDailyResourceInvoice> {
 	@JsonProperty
 	private String dlabId;
 	@JsonProperty
-	private String user;
-	@JsonProperty
-	private String project;
-	@JsonProperty
-	private String exploratoryId;
-	@JsonProperty
-	private String computationalId;
-	@JsonProperty
-	private DlabResourceType resourceType;
-	@JsonProperty
-	private String resourceName;
-	@JsonProperty
 	private String meterCategory;
 	@JsonProperty
 	private String usageStartDate;
@@ -59,22 +45,4 @@
 	private double cost;
 	@JsonProperty
 	private String currencyCode;
-
-	@Builder
-	public AzureDailyResourceInvoice(AzureDlabBillableResource azureDlabBillableResource) {
-		this.dlabId = azureDlabBillableResource.getId();
-		this.user = azureDlabBillableResource.getUser();
-		this.project = azureDlabBillableResource.getProject();
-		this.resourceType = azureDlabBillableResource.getType();
-		this.resourceName = azureDlabBillableResource.getResourceName();
-
-		if (resourceType == DlabResourceType.EXPLORATORY) {
-			this.exploratoryId = azureDlabBillableResource.getId();
-		} else if (resourceType == DlabResourceType.COMPUTATIONAL) {
-			this.computationalId = azureDlabBillableResource.getId();
-			this.exploratoryId = azureDlabBillableResource.getNotebookId();
-		} else if (resourceType == DlabResourceType.VOLUME) {
-			this.exploratoryId = azureDlabBillableResource.getNotebookId();
-		}
-	}
 }
diff --git a/services/billing-azure/src/main/resources/application.yml b/services/billing-azure/src/main/resources/application.yml
new file mode 100644
index 0000000..482a78d
--- /dev/null
+++ b/services/billing-azure/src/main/resources/application.yml
@@ -0,0 +1,80 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+spring:
+  main:
+    allow-bean-definition-overriding: true
+  data:
+    mongodb:
+      username: admin
+      password: admin
+      database: dlabdb
+      port: 27017
+      host: localhost
+
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
+
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /Users/ofuks/keys/dlabcert/billing.jks
+server.ssl.key-store-password: KEYSTORE_PASSWORD
+server.ssl.key-alias: billing
+
+logging:
+  file: /var/opt/dlab/log/ssn/billing.log
+  level:
+    com:
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: DLAB_bhliva
+  resource: sss
+  credentials.secret: cf5a484b-039b-4161-8707-ad65c0f25962
+  ssl-required: none
+  auth-server-url: http://52.11.45.11:8080/auth
+
+dlab:
+  sbn: <CONF_SERVICE_BASE_NAME>
+  billingEnabled: true
+  clientId: <CLIENT_ID>
+  clientSecret: <CLIENT_SECRET>
+  tenantId: <TENANT_ID>
+  subscriptionId: <SUBSCRIPTION_ID>
+  authenticationFile: <AUTHENTICATION_FILE>
+  # Billing configuration for RateCard API. For more details please see https://msdn.microsoft.com/en-us/library/mt219004.aspx
+  offerNumber: <OFFER_NUMBER>
+  currency: <CURRENCY>
+  locale: <LOCALE>
+  regionInfo: <REGION_INFO>
+  initialDelay: 1
+  period: 60
+  aggregationOutputMongoDataSource:
+    host: localhost
+    port: 27017
+    username: admin
+    password: <MONGODB_PASSWORD>
+    database: dlabdb
+  ssnStorageAccountTagName: <AZURE_SSN_STORAGE_ACCOUNT_TAG>
+  sharedStorageAccountTagName: <AZURE_SHARED_STORAGE_ACCOUNT_TAG>
+  datalakeTagName: <AZURE_DATALAKE_TAG>
\ No newline at end of file
diff --git a/services/billing-gcp/Dockerfile b/services/billing-gcp/Dockerfile
index cd23ea7..c4e6733 100644
--- a/services/billing-gcp/Dockerfile
+++ b/services/billing-gcp/Dockerfile
@@ -23,6 +23,6 @@
 
 USER root
 
-COPY billing-gcp-2.1.jar /root/
+COPY billing-gcp-2.2.jar /root/
 
-CMD java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 /root/billing-gcp-2.1.jar  --spring.config.location=/root/billing.yml
\ No newline at end of file
+CMD java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 /root/billing-gcp-2.2.jar  --spring.config.location=/root/billing.yml
\ No newline at end of file
diff --git a/services/billing-gcp/billing.yml b/services/billing-gcp/billing.yml
index 8ee8e4d..ecdcc45 100644
--- a/services/billing-gcp/billing.yml
+++ b/services/billing-gcp/billing.yml
@@ -1,4 +1,4 @@
-# ******************************************************************************
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -19,23 +19,41 @@
 #
 # ******************************************************************************
 
-server:
-  port: 8088
 spring:
+  main:
+    allow-bean-definition-overriding: true
   data:
     mongodb:
       username: admin
-      password: <MONGO_PASSWORD>
+      password: MONGO_PASSWORD
       database: dlabdb
       port: 27017
-      host: localhost
+      host: MONGO_HOST
 dlab:
-  sbn: <CONF_SERVICE_BASE_NAME>
-  bigQueryDataset: <BILLING_DATASET_NAME>
-  cron: 0 0 */1 * * *
+  sbn: SERVICE_BASE_NAME
+  bigQueryDataset: DATASET_NAME
+  cron: 0 0 * * * *
+
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
+
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /home/OS_USER/keys/ssn.keystore.jks
+server.ssl.key-store-password: KEY_STORE_PASSWORD
+server.ssl.key-alias: ssn
 
 logging:
   file: /var/opt/dlab/log/ssn/billing.log
   level:
     com:
-      epam: trace
\ No newline at end of file
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: dlab
+  resource: KEYCLOAK_CLIENT_ID
+  credentials.secret: KEYCLOAK_CLIENT_SECRET
+  ssl-required: none
+  auth-server-url: KEYCLOAK_AUTH_SERVER_URL
\ No newline at end of file
diff --git a/services/billing-gcp/pom.xml b/services/billing-gcp/pom.xml
index 114b25d..43dff3b 100644
--- a/services/billing-gcp/pom.xml
+++ b/services/billing-gcp/pom.xml
@@ -39,6 +39,13 @@
                 <type>pom</type>
                 <scope>import</scope>
             </dependency>
+            <dependency>
+                <groupId>org.keycloak.bom</groupId>
+                <artifactId>keycloak-adapter-bom</artifactId>
+                <version>4.8.3.Final</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
         </dependencies>
     </dependencyManagement>
 
@@ -57,6 +64,19 @@
             <artifactId>spring-boot-starter-web</artifactId>
         </dependency>
         <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-security</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.keycloak</groupId>
+            <artifactId>keycloak-spring-boot-starter</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.keycloak</groupId>
+            <artifactId>keycloak-spring-security-adapter</artifactId>
+            <version>4.8.3.Final</version>
+        </dependency>
+        <dependency>
             <groupId>org.springframework</groupId>
             <artifactId>spring-test</artifactId>
             <scope>test</scope>
@@ -73,6 +93,16 @@
             <version>${org.mockito.version}</version>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>com.epam.dlab</groupId>
+            <artifactId>dlab-model</artifactId>
+            <version>${project.parent.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+            <version>3.7</version>
+        </dependency>
     </dependencies>
 
     <build>
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/BillingGcpApplication.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/BillingGcpApplication.java
index 0e31323..c454038 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/BillingGcpApplication.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/BillingGcpApplication.java
@@ -23,10 +23,8 @@
 import org.springframework.boot.autoconfigure.SpringBootApplication;
 import org.springframework.boot.context.properties.EnableConfigurationProperties;
 import org.springframework.data.mongodb.repository.config.EnableMongoRepositories;
-import org.springframework.scheduling.annotation.EnableScheduling;
 
 @SpringBootApplication
-@EnableScheduling
 @EnableMongoRepositories
 @EnableConfigurationProperties
 public class BillingGcpApplication {
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/conf/BillingApplicationConfiguration.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/conf/BillingApplicationConfiguration.java
index f565c6f..79c1d9e 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/conf/BillingApplicationConfiguration.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/conf/BillingApplicationConfiguration.java
@@ -27,10 +27,8 @@
 @Configuration
 public class BillingApplicationConfiguration {
 
-
     @Bean
     public BigQuery bigQueryService() {
         return BigQueryOptions.getDefaultInstance().getService();
     }
-
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/conf/SecurityConfig.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/conf/SecurityConfig.java
new file mode 100644
index 0000000..ad960b0
--- /dev/null
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/conf/SecurityConfig.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.billing.gcp.conf;
+
+import org.keycloak.adapters.KeycloakConfigResolver;
+import org.keycloak.adapters.springboot.KeycloakSpringBootConfigResolver;
+import org.keycloak.adapters.springsecurity.KeycloakConfiguration;
+import org.keycloak.adapters.springsecurity.authentication.KeycloakAuthenticationProvider;
+import org.keycloak.adapters.springsecurity.config.KeycloakWebSecurityConfigurerAdapter;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.annotation.Bean;
+import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
+import org.springframework.security.config.annotation.web.builders.HttpSecurity;
+import org.springframework.security.core.authority.mapping.SimpleAuthorityMapper;
+import org.springframework.security.core.session.SessionRegistryImpl;
+import org.springframework.security.web.authentication.session.RegisterSessionAuthenticationStrategy;
+import org.springframework.security.web.authentication.session.SessionAuthenticationStrategy;
+
+@KeycloakConfiguration
+class SecurityConfig extends KeycloakWebSecurityConfigurerAdapter {
+
+    @Autowired
+    public void configureGlobal(AuthenticationManagerBuilder auth) {
+        KeycloakAuthenticationProvider keycloakAuthenticationProvider = keycloakAuthenticationProvider();
+        keycloakAuthenticationProvider.setGrantedAuthoritiesMapper(new SimpleAuthorityMapper());
+        auth.authenticationProvider(keycloakAuthenticationProvider);
+    }
+
+    @Bean
+    public KeycloakConfigResolver KeycloakConfigResolver() {
+        return new KeycloakSpringBootConfigResolver();
+    }
+
+    @Bean
+    @Override
+    protected SessionAuthenticationStrategy sessionAuthenticationStrategy() {
+        return new RegisterSessionAuthenticationStrategy(new SessionRegistryImpl());
+    }
+
+    @Override
+    protected void configure(HttpSecurity http) throws Exception {
+        super.configure(http);
+        http
+                .anonymous().disable()
+                .authorizeRequests()
+                .anyRequest()
+                .authenticated();
+    }
+}
\ No newline at end of file
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/controller/BillingController.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/controller/BillingController.java
new file mode 100644
index 0000000..ea45d89
--- /dev/null
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/controller/BillingController.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.billing.gcp.controller;
+
+import com.epam.dlab.billing.gcp.service.BillingService;
+import com.epam.dlab.dto.billing.BillingData;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RestController;
+
+import java.util.List;
+
+@RestController
+public class BillingController {
+
+    private final BillingService billingService;
+
+    public BillingController(BillingService billingService) {
+        this.billingService = billingService;
+    }
+
+    @GetMapping
+    public ResponseEntity<List<BillingData>> getBilling() {
+        return new ResponseEntity<>(billingService.getBillingData(), HttpStatus.OK);
+    }
+}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java
index 7e6b0b7..7c791df 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java
@@ -19,11 +19,10 @@
 
 package com.epam.dlab.billing.gcp.dao;
 
-import com.epam.dlab.billing.gcp.model.GcpBillingData;
+import com.epam.dlab.dto.billing.BillingData;
 
 import java.util.List;
 
 public interface BillingDAO {
-
-    List<GcpBillingData> getBillingData() throws InterruptedException;
+    List<BillingData> getBillingData() throws InterruptedException;
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java
index b0ece02..061283d 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java
@@ -22,16 +22,23 @@
 import com.epam.dlab.billing.gcp.conf.DlabConfiguration;
 import com.epam.dlab.billing.gcp.dao.BillingDAO;
 import com.epam.dlab.billing.gcp.model.BillingHistory;
-import com.epam.dlab.billing.gcp.model.GcpBillingData;
 import com.epam.dlab.billing.gcp.repository.BillingHistoryRepository;
-import com.google.cloud.bigquery.*;
+import com.epam.dlab.dto.billing.BillingData;
+import com.google.cloud.bigquery.BigQuery;
+import com.google.cloud.bigquery.FieldValueList;
+import com.google.cloud.bigquery.QueryJobConfiguration;
+import com.google.cloud.bigquery.QueryParameterValue;
+import com.google.cloud.bigquery.Table;
+import com.google.cloud.bigquery.TableInfo;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.data.mongodb.core.MongoTemplate;
 import org.springframework.stereotype.Component;
 
 import java.time.Instant;
 import java.time.LocalDate;
 import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
@@ -41,10 +48,14 @@
 @Component
 @Slf4j
 public class BigQueryBillingDAO implements BillingDAO {
-
+	private static final String DATE_FORMAT = "yyyy-MM-dd";
 	private static final String SBN_PARAM = "sbn";
 	private static final String DATASET_PARAM = "dataset";
+
 	private final BillingHistoryRepository billingHistoryRepo;
+	private final MongoTemplate mongoTemplate;
+	private final BigQuery service;
+	private final String dataset;
 	private final String sbn;
 
 	private static final String GET_BILLING_DATA_QUERY = "SELECT b.sku.description usageType," +
@@ -55,19 +66,19 @@
 			"CROSS JOIN UNNEST(b.labels) as label\n" +
 			"where label.key = 'name' and cost != 0 and label.value like @sbn\n" +
 			"group by usageType, usage_date_from, usage_date_to, product, value, currency";
-	private final BigQuery service;
-	private final String dataset;
 
 	@Autowired
-	public BigQueryBillingDAO(DlabConfiguration conf, BigQuery service, BillingHistoryRepository billingHistoryRepo) {
+	public BigQueryBillingDAO(DlabConfiguration conf, BillingHistoryRepository billingHistoryRepo,
+							  BigQuery service, MongoTemplate mongoTemplate) {
 		dataset = conf.getBigQueryDataset();
-		sbn = conf.getSbn();
 		this.service = service;
 		this.billingHistoryRepo = billingHistoryRepo;
+		this.mongoTemplate = mongoTemplate;
+		sbn = conf.getSbn();
 	}
 
 	@Override
-	public List<GcpBillingData> getBillingData() {
+	public List<BillingData> getBillingData() {
 		final Map<String, Long> processedBillingTables = billingHistoryRepo.findAll()
 				.stream()
 				.collect(Collectors.toMap(BillingHistory::getTableName, BillingHistory::getLastModified));
@@ -82,7 +93,7 @@
 				.collect(Collectors.toList());
 	}
 
-	private Stream<? extends GcpBillingData> bigQueryResultSetStream(Table table) {
+	private Stream<? extends BillingData> bigQueryResultSetStream(Table table) {
 		try {
 			final String tableName = table.getTableId().getTable();
 			final String tableId = table.getTableId().getDataset() + "." + tableName;
@@ -91,9 +102,9 @@
 					.addNamedParameter(SBN_PARAM, QueryParameterValue.string(sbn + "%"))
 					.addNamedParameter(DATASET_PARAM, QueryParameterValue.string(tableId))
 					.build();
-			final Stream<GcpBillingData> gcpBillingDataStream =
+			final Stream<BillingData> gcpBillingDataStream =
 					StreamSupport.stream(service.query(queryConfig).getValues().spliterator(), false)
-							.map(this::toBillingData);
+							.map(this::toGcpBillingData);
 			billingHistoryRepo.save(new BillingHistory(tableName, table.getLastModifiedTime()));
 			return gcpBillingDataStream;
 		} catch (InterruptedException e) {
@@ -101,16 +112,17 @@
 		}
 	}
 
-	private GcpBillingData toBillingData(FieldValueList fields) {
-
-		return GcpBillingData.builder()
+	private BillingData toGcpBillingData(FieldValueList fields) {
+		return BillingData.builder()
 				.usageDateFrom(toLocalDate(fields, "usage_date_from"))
 				.usageDateTo(toLocalDate(fields, "usage_date_to"))
-				.cost(fields.get("cost").getNumericValue())
+				.cost(fields.get("cost").getNumericValue().doubleValue())
 				.product(fields.get("product").getStringValue())
 				.usageType(fields.get("usageType").getStringValue())
 				.currency(fields.get("currency").getStringValue())
-				.tag(fields.get("value").getStringValue()).build();
+				.tag(fields.get("value").getStringValue().toLowerCase())
+				.usageDate(toLocalDate(fields, "usage_date_from").format((DateTimeFormatter.ofPattern(DATE_FORMAT))))
+				.build();
 	}
 
 	private LocalDate toLocalDate(FieldValueList fieldValues, String timestampFieldName) {
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/UserInstance.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/UserInstance.java
deleted file mode 100644
index b5a61ba..0000000
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/UserInstance.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.gcp.documents;
-
-import com.epam.dlab.billing.gcp.model.BillingData;
-import lombok.Data;
-import org.springframework.data.annotation.Id;
-import org.springframework.data.mongodb.core.mapping.Document;
-import org.springframework.data.mongodb.core.mapping.Field;
-
-import java.util.List;
-
-@Document(collection = "userInstances")
-@Data
-public class UserInstance {
-
-    @Id
-    private String id;
-    @Field("user")
-    private String user;
-    @Field("exploratory_name")
-    private String exploratoryName;
-    @Field("exploratory_id")
-    private String exploratoryId;
-    private String project;
-    private List<BillingData> billing;
-    private String cost;
-    @Field("computational_resources")
-    private List<ComputationalResource> computationalResources;
-
-    @Data
-    public class ComputationalResource {
-        @Field("computational_name")
-        private String computationalName;
-        @Field("computational_id")
-        private String computationalId;
-    }
-}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/BillingHistory.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/BillingHistory.java
index 8fb80f1..a232ecc 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/BillingHistory.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/BillingHistory.java
@@ -26,7 +26,7 @@
 @Data
 @AllArgsConstructor
 public class BillingHistory {
-	@Id
-	private String tableName;
-	private final long lastModified;
+    @Id
+    private String tableName;
+    private final long lastModified;
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java
index a11dcce..a2bd12b 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java
@@ -21,18 +21,24 @@
 
 import lombok.Builder;
 import lombok.Data;
+import org.springframework.data.mongodb.core.mapping.Document;
+import org.springframework.data.mongodb.core.mapping.Field;
 
-import java.math.BigDecimal;
 import java.time.LocalDate;
 
 @Data
 @Builder
+@Document(collection = "billing")
 public class GcpBillingData {
+    @Field("from")
     private final LocalDate usageDateFrom;
+    @Field("to")
     private final LocalDate usageDateTo;
     private final String product;
     private final String usageType;
-    private final BigDecimal cost;
+    private final Double cost;
     private final String currency;
+    @Field("dlabId")
     private final String tag;
+    private final String usageDate;
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/BillingRepository.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/BillingRepository.java
index 9dbfe98..2d4c5c1 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/BillingRepository.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/BillingRepository.java
@@ -19,9 +19,9 @@
 
 package com.epam.dlab.billing.gcp.repository;
 
-import com.epam.dlab.billing.gcp.model.BillingData;
+import com.epam.dlab.billing.gcp.model.GcpBillingData;
 import org.springframework.data.mongodb.repository.MongoRepository;
 
-public interface BillingRepository extends MongoRepository<BillingData, String> {
+public interface BillingRepository extends MongoRepository<GcpBillingData, String> {
 	void deleteByUsageDateRegex(String usageDateRegex);
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/ProjectRepository.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/ProjectRepository.java
deleted file mode 100644
index 955c6a2..0000000
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/ProjectRepository.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.gcp.repository;
-
-import com.epam.dlab.billing.gcp.documents.Project;
-import org.springframework.data.mongodb.repository.MongoRepository;
-
-public interface ProjectRepository extends MongoRepository<Project, String> {
-}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/UserInstanceRepository.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/UserInstanceRepository.java
deleted file mode 100644
index a95d033..0000000
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/repository/UserInstanceRepository.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.gcp.repository;
-
-import com.epam.dlab.billing.gcp.documents.UserInstance;
-import org.springframework.data.mongodb.repository.MongoRepository;
-
-import java.util.Optional;
-
-public interface UserInstanceRepository extends MongoRepository<UserInstance, String> {
-
-    Optional<UserInstance> findByUserAndExploratoryName(String user, String exploratoryName);
-}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/scheduler/BillingScheduler.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/scheduler/BillingScheduler.java
deleted file mode 100644
index 9724d43..0000000
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/scheduler/BillingScheduler.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.gcp.scheduler;
-
-import com.epam.dlab.billing.gcp.service.BillingService;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.scheduling.annotation.Scheduled;
-import org.springframework.stereotype.Component;
-
-@Component
-public class BillingScheduler {
-
-	private final BillingService billingService;
-
-	@Autowired
-	public BillingScheduler(BillingService billingService) {
-		this.billingService = billingService;
-	}
-
-
-	@Scheduled(cron = "${dlab.cron}")
-	public void getBillingReport() {
-		billingService.updateBillingData();
-	}
-}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/BillingService.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/BillingService.java
index 71015aa..7bb3246 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/BillingService.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/BillingService.java
@@ -19,7 +19,10 @@
 
 package com.epam.dlab.billing.gcp.service;
 
-public interface BillingService {
+import com.epam.dlab.dto.billing.BillingData;
 
-    void updateBillingData();
+import java.util.List;
+
+public interface BillingService {
+    List<BillingData> getBillingData();
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/BillingServiceImpl.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/BillingServiceImpl.java
deleted file mode 100644
index e2ae138..0000000
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/BillingServiceImpl.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.gcp.service;
-
-import com.epam.dlab.billing.gcp.dao.BillingDAO;
-import com.epam.dlab.billing.gcp.documents.Project;
-import com.epam.dlab.billing.gcp.documents.UserInstance;
-import com.epam.dlab.billing.gcp.model.BillingData;
-import com.epam.dlab.billing.gcp.model.GcpBillingData;
-import com.epam.dlab.billing.gcp.repository.BillingRepository;
-import com.epam.dlab.billing.gcp.repository.ProjectRepository;
-import com.epam.dlab.billing.gcp.repository.UserInstanceRepository;
-import com.epam.dlab.billing.gcp.util.BillingUtils;
-import lombok.extern.slf4j.Slf4j;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.beans.factory.annotation.Value;
-import org.springframework.data.mongodb.core.MongoTemplate;
-import org.springframework.data.mongodb.core.query.Query;
-import org.springframework.data.mongodb.core.query.Update;
-import org.springframework.stereotype.Service;
-
-import java.math.BigDecimal;
-import java.time.format.DateTimeFormatter;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.stream.Collector;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static com.epam.dlab.billing.gcp.util.BillingUtils.edgeBillingDataStream;
-import static org.springframework.data.mongodb.core.query.Criteria.where;
-
-@Service
-@Slf4j
-public class BillingServiceImpl implements BillingService {
-
-	private static final String DATE_FORMAT = "yyyy-MM-dd";
-	private static final String USAGE_DATE_FORMAT = "yyyy-MM";
-	private final BillingDAO billingDAO;
-	private final ProjectRepository projectRepository;
-	private final UserInstanceRepository userInstanceRepository;
-	private final BillingRepository billingRepository;
-	private final MongoTemplate mongoTemplate;
-	@Value("${dlab.sbn}")
-	private String sbn;
-
-	@Autowired
-	public BillingServiceImpl(BillingDAO billingDAO, ProjectRepository projectRepository,
-							  UserInstanceRepository userInstanceRepository, BillingRepository billingRepository,
-							  MongoTemplate mongoTemplate) {
-		this.billingDAO = billingDAO;
-		this.projectRepository = projectRepository;
-		this.userInstanceRepository = userInstanceRepository;
-		this.billingRepository = billingRepository;
-		this.mongoTemplate = mongoTemplate;
-	}
-
-	@Override
-	public void updateBillingData() {
-		try {
-
-			final Stream<BillingData> ssnBillingDataStream = BillingUtils.ssnBillingDataStream(sbn);
-			final Stream<BillingData> billableUserInstances = userInstanceRepository.findAll()
-					.stream()
-					.filter(userInstance -> userInstance.getExploratoryId() != null)
-					.flatMap(BillingUtils::exploratoryBillingDataStream);
-
-			final Stream<BillingData> billableEdges = projectRepository.findAll()
-					.stream()
-					.collect(Collectors.toMap(Project::getName, Project::getEndpoints))
-					.entrySet()
-					.stream()
-					.flatMap(e -> projectEdges(e.getKey(), e.getValue()));
-
-
-			final Map<String, BillingData> billableResources = Stream.of(billableUserInstances, billableEdges,
-					ssnBillingDataStream)
-					.flatMap(s -> s)
-					.filter(bd -> bd.getDlabId() != null)
-					.collect(Collectors.toMap(BillingData::getDlabId, b -> b));
-			log.info("Billable resources are: {}", billableResources);
-			final Map<String, List<BillingData>> billingDataMap = billingDAO.getBillingData()
-					.stream()
-					.map(bd -> toBillingData(bd, getOrDefault(billableResources, bd.getTag())))
-					.collect(Collectors.groupingBy(bd -> bd.getUsageDate().substring(0,
-							USAGE_DATE_FORMAT.length())));
-
-			billingDataMap.forEach((usageDate, billingDataList) -> {
-				log.info("Updating billing information for month {}", usageDate);
-				billingRepository.deleteByUsageDateRegex("^" + usageDate);
-				billingRepository.insert(billingDataList);
-				updateExploratoryCost(billingDataList);
-			});
-
-			log.info("Finished updating billing data");
-
-
-		} catch (Exception e) {
-			log.error("Can not update billing due to: {}", e.getMessage(), e);
-		}
-	}
-
-	private Stream<BillingData> projectEdges(String projectName, List<Project.Endpoint> endpoints) {
-		return endpoints
-				.stream()
-				.flatMap(endpoint -> edgeBillingDataStream(projectName.toLowerCase(), sbn, endpoint.getName()));
-	}
-
-	private BillingData getOrDefault(Map<String, BillingData> billableResources, String tag) {
-		return billableResources.getOrDefault(tag, BillingData.builder().dlabId(tag).build());
-	}
-
-	private void updateExploratoryCost(List<BillingData> billingDataList) {
-		billingDataList.stream()
-				.filter(this::userAndExploratoryNamePresent)
-				.collect(groupByUserNameExploratoryNameCollector())
-				.forEach(this::updateUserExploratoryBillingData);
-	}
-
-	private void updateUserExploratoryBillingData(String user,
-												  Map<String, List<BillingData>> billableExploratoriesMap) {
-		billableExploratoriesMap.forEach((exploratoryName, billingInfoList) ->
-				updateExploratoryBillingData(user, exploratoryName, billingInfoList)
-		);
-	}
-
-	private Collector<BillingData, ?, Map<String, Map<String, List<BillingData>>>> groupByUserNameExploratoryNameCollector() {
-		return Collectors.groupingBy(BillingData::getUser, Collectors.groupingBy(BillingData::getExploratoryName));
-	}
-
-	private boolean userAndExploratoryNamePresent(BillingData bd) {
-		return Objects.nonNull(bd.getUser()) && Objects.nonNull(bd.getExploratoryName());
-	}
-
-	private void updateExploratoryBillingData(String user, String exploratoryName, List<BillingData> billingInfoList) {
-		userInstanceRepository.findByUserAndExploratoryName(user, exploratoryName).ifPresent(userInstance ->
-				mongoTemplate.updateFirst(Query.query(where("user").is(user).and("exploratory_name").is(exploratoryName)),
-						Update.update("cost", getTotalCost(billingInfoList) + "$").set("billing", billingInfoList),
-						UserInstance.class));
-	}
-
-	private double getTotalCost(List<BillingData> billingInfoList) {
-		return new BigDecimal(billingInfoList.stream().mapToDouble(BillingData::getCost).sum())
-				.setScale(2, BigDecimal.ROUND_HALF_UP)
-				.doubleValue();
-
-	}
-
-	private BillingData toBillingData(GcpBillingData bd, BillingData billableResource) {
-
-		return BillingData.builder()
-				.displayName(billableResource.getDisplayName())
-				.cost(bd.getCost().setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue())
-				.currency(bd.getCurrency())
-				.product(bd.getProduct())
-				.project(billableResource.getProject())
-				.usageDateTo(bd.getUsageDateTo())
-				.usageDateFrom(bd.getUsageDateFrom())
-				.usageDate(bd.getUsageDateFrom().format((DateTimeFormatter.ofPattern(DATE_FORMAT))))
-				.usageType(bd.getUsageType())
-				.user(billableResource.getUser())
-				.exploratoryName(billableResource.getExploratoryName())
-				.computationalName(billableResource.getComputationalName())
-				.dlabId(bd.getTag())
-				.resourceType(billableResource.getResourceType())
-				.build();
-	}
-}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/impl/BillingServiceImpl.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/impl/BillingServiceImpl.java
new file mode 100644
index 0000000..5661dfb
--- /dev/null
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/service/impl/BillingServiceImpl.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.billing.gcp.service.impl;
+
+import com.epam.dlab.billing.gcp.dao.BillingDAO;
+import com.epam.dlab.billing.gcp.service.BillingService;
+import com.epam.dlab.dto.billing.BillingData;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.util.Collections;
+import java.util.List;
+
+@Service
+@Slf4j
+public class BillingServiceImpl implements BillingService {
+
+	private final BillingDAO billingDAO;
+
+	@Autowired
+	public BillingServiceImpl(BillingDAO billingDAO) {
+		this.billingDAO = billingDAO;
+	}
+
+	@Override
+	public List<BillingData> getBillingData() {
+		try {
+			return billingDAO.getBillingData();
+		} catch (Exception e) {
+			log.error("Can not update billing due to: {}", e.getMessage(), e);
+			return Collections.emptyList();
+		}
+	}
+}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/util/BillingUtils.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/util/BillingUtils.java
deleted file mode 100644
index e049393..0000000
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/util/BillingUtils.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.gcp.util;
-
-import com.epam.dlab.billing.gcp.documents.UserInstance;
-import com.epam.dlab.billing.gcp.model.BillingData;
-
-import java.util.stream.Stream;
-
-public class BillingUtils {
-
-	private static final String EDGE_FORMAT = "%s-%s-%s-edge";
-	private static final String EDGE_VOLUME_FORMAT = "%s-%s-%s-edge-volume-primary";
-	private static final String EDGE_BUCKET_FORMAT = "%s-%s-bucket";
-	private static final String VOLUME_PRIMARY_FORMAT = "%s-volume-primary";
-	private static final String VOLUME_SECONDARY_FORMAT = "%s-volume-secondary";
-	private static final String VOLUME_PRIMARY = "Volume primary";
-	private static final String VOLUME_SECONDARY = "Volume secondary";
-	private static final String SHARED_RESOURCE = "Shared resource";
-
-	public static Stream<BillingData> edgeBillingDataStream(String project, String sbn, String endpoint) {
-		final String userEdgeId = String.format(EDGE_FORMAT, sbn, project, endpoint);
-		final String edgeVolumeId = String.format(EDGE_VOLUME_FORMAT, sbn, project, endpoint);
-		final String edgeBucketId = String.format(EDGE_BUCKET_FORMAT, sbn, project);
-		return Stream.of(
-				BillingData.builder().displayName("EDGE node").user(SHARED_RESOURCE).project(project).dlabId(userEdgeId).resourceType(BillingData.ResourceType.EDGE).build(),
-				BillingData.builder().displayName("EDGE volume").user(SHARED_RESOURCE).project(project).dlabId(edgeVolumeId).resourceType(BillingData.ResourceType.VOLUME).build(),
-				BillingData.builder().displayName("EDGE bucket").user(SHARED_RESOURCE).project(project).dlabId(edgeBucketId).resourceType(BillingData.ResourceType.EDGE_BUCKET).build()
-		);
-	}
-
-	public static Stream<BillingData> ssnBillingDataStream(String sbn) {
-		final String ssnId = sbn + "-ssn";
-		final String bucketName = sbn.replaceAll("_", "-");
-		return Stream.of(
-				BillingData.builder().user(SHARED_RESOURCE).displayName("SSN").dlabId(ssnId).resourceType(BillingData.ResourceType.SSN).build(),
-				BillingData.builder().user(SHARED_RESOURCE).displayName("SSN Volume").dlabId(String.format(VOLUME_PRIMARY_FORMAT, ssnId)).resourceType(BillingData.ResourceType.VOLUME).build(),
-				BillingData.builder().user(SHARED_RESOURCE).displayName("SSN bucket").dlabId(bucketName + "-ssn" +
-						"-bucket").resourceType(BillingData.ResourceType.SSN_BUCKET).build(),
-				BillingData.builder().user(SHARED_RESOURCE).displayName("Collaboration bucket").dlabId(bucketName +
-						"-shared-bucket").resourceType(BillingData.ResourceType.SHARED_BUCKET).build()
-		);
-	}
-
-	public static Stream<BillingData> exploratoryBillingDataStream(UserInstance userInstance) {
-		final Stream<BillingData> computationalStream = userInstance.getComputationalResources()
-				.stream()
-				.filter(cr -> cr.getComputationalId() != null)
-				.flatMap(cr -> Stream.of(computationalBillableResource(userInstance, cr),
-						withExploratoryName(userInstance).displayName(cr.getComputationalName() + ":" + VOLUME_PRIMARY).dlabId(String.format(VOLUME_PRIMARY_FORMAT, cr.getComputationalId()))
-								.resourceType(BillingData.ResourceType.VOLUME).computationalName(cr.getComputationalName()).build()));
-		final String exploratoryId = userInstance.getExploratoryId();
-		final String primaryVolumeId = String.format(VOLUME_PRIMARY_FORMAT, exploratoryId);
-		final String secondaryVolumeId = String.format(VOLUME_SECONDARY_FORMAT, exploratoryId);
-		final Stream<BillingData> exploratoryStream = Stream.of(
-				withExploratoryName(userInstance).displayName(userInstance.getExploratoryName()).dlabId(exploratoryId).resourceType(BillingData.ResourceType.EXPLORATORY).build(),
-				withExploratoryName(userInstance).displayName(VOLUME_PRIMARY).dlabId(primaryVolumeId).resourceType(BillingData.ResourceType.VOLUME).build(),
-				withExploratoryName(userInstance).displayName(VOLUME_SECONDARY).dlabId(secondaryVolumeId).resourceType(BillingData.ResourceType.VOLUME).build());
-		return Stream.concat(computationalStream, exploratoryStream);
-	}
-
-	private static BillingData computationalBillableResource(UserInstance userInstance,
-															 UserInstance.ComputationalResource cr) {
-		return withExploratoryName(userInstance)
-				.dlabId(cr.getComputationalId())
-				.displayName(cr.getComputationalName())
-				.resourceType(BillingData.ResourceType.COMPUTATIONAL)
-				.computationalName(cr.getComputationalName())
-				.project(userInstance.getProject())
-				.build();
-	}
-
-	private static BillingData.BillingDataBuilder withExploratoryName(UserInstance userInstance) {
-		return BillingData.builder().user(userInstance.getUser()).exploratoryName(userInstance.getExploratoryName())
-				.project(userInstance.getProject());
-	}
-
-}
diff --git a/services/billing-gcp/src/main/resources/application.yml b/services/billing-gcp/src/main/resources/application.yml
index 3761be3..45bab37 100644
--- a/services/billing-gcp/src/main/resources/application.yml
+++ b/services/billing-gcp/src/main/resources/application.yml
@@ -1,4 +1,4 @@
-# ******************************************************************************
+# *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -20,20 +20,40 @@
 # ******************************************************************************
 
 spring:
+  main:
+    allow-bean-definition-overriding: true
   data:
     mongodb:
       username: admin
       password: admin
-      database: <MONGO_PASSWORD>
+      database: dlabdb
       port: 27017
       host: localhost
 dlab:
   sbn: <CONF_SERVICE_BASE_NAME>
   bigQueryDataset: <DATASET_NAME>
-  cron: 0 * * * * *
+  cron: 0 0 * * * *
+
+server:
+  port: 8088
+  servlet:
+    contextPath: /api/billing
+
+server.ssl.key-store-type: JKS
+server.ssl.key-store: /Users/ofuks/keys/dlabcert/billing.jks
+server.ssl.key-store-password: KEYSTORE_PASSWORD
+server.ssl.key-alias: billing
 
 logging:
   file: /var/opt/dlab/log/ssn/billing.log
   level:
     com:
-      epam: trace
\ No newline at end of file
+      epam: trace
+
+keycloak:
+  bearer-only: true
+  realm: DLAB_bhliva
+  resource: sss
+  credentials.secret: cf5a484b-039b-4161-8707-ad65c0f25962
+  ssl-required: none
+  auth-server-url: http://52.11.45.11:8080/auth
\ No newline at end of file
diff --git a/services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java b/services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java
deleted file mode 100644
index 2b8164c..0000000
--- a/services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.billing.gcp.service;
-
-import com.epam.dlab.billing.gcp.dao.BillingDAO;
-import com.epam.dlab.billing.gcp.documents.UserInstance;
-import com.epam.dlab.billing.gcp.model.GcpBillingData;
-import com.epam.dlab.billing.gcp.repository.BillingRepository;
-import com.epam.dlab.billing.gcp.repository.ProjectRepository;
-import com.epam.dlab.billing.gcp.repository.UserInstanceRepository;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-import org.springframework.data.mongodb.core.MongoTemplate;
-import org.springframework.test.util.ReflectionTestUtils;
-
-import java.math.BigDecimal;
-import java.time.LocalDate;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import static org.mockito.Mockito.*;
-
-
-@RunWith(MockitoJUnitRunner.class)
-public class BillingServiceImplTest {
-    @Mock
-    private BillingDAO billingDAO;
-    @Mock
-    private ProjectRepository projectRepository;
-    @Mock
-    private UserInstanceRepository userInstanceRepository;
-    @Mock
-    private BillingRepository billingRepository;
-    @Mock
-    private MongoTemplate mongoTemplate;
-    @InjectMocks
-    private BillingServiceImpl billingService;
-
-    @Before
-    public void setUp() {
-        ReflectionTestUtils.setField(billingService, "sbn", "CONF_SERVICE_BASE_NAME");
-    }
-
-    @Test
-    public void updateBillingData() throws InterruptedException {
-        when(userInstanceRepository.findAll()).thenReturn(getUserInstances());
-        when(billingDAO.getBillingData()).thenReturn(getBillingData());
-
-        billingService.updateBillingData();
-
-        verify(userInstanceRepository).findAll();
-        verify(userInstanceRepository, times(1)).findAll();
-        verify(billingDAO).getBillingData();
-        verify(billingDAO, times(1)).getBillingData();
-        verify(projectRepository, times(1)).findAll();
-        verify(billingRepository, times(1)).deleteByUsageDateRegex(anyString());
-        verify(billingRepository, times(1)).insert(anyCollection());
-
-        verifyNoMoreInteractions(billingDAO, userInstanceRepository, projectRepository);
-    }
-
-    private List<UserInstance> getUserInstances() {
-        UserInstance userInstance1 = new UserInstance();
-        userInstance1.setComputationalResources(Collections.emptyList());
-
-        UserInstance userInstance2 = new UserInstance();
-        userInstance2.setComputationalResources(Collections.emptyList());
-        userInstance2.setExploratoryId("exploratoryIId");
-
-        return Arrays.asList(userInstance1, userInstance1, userInstance2);
-    }
-
-    private List<GcpBillingData> getBillingData() {
-        return Collections.singletonList(GcpBillingData.builder()
-                .usageDateFrom(LocalDate.MIN)
-                .usageDateTo(LocalDate.MAX)
-                .product("product")
-                .usageType("usageType")
-                .cost(new BigDecimal(1))
-                .currency("USD")
-                .tag("exploratoryId")
-                .build());
-    }
-}
\ No newline at end of file
diff --git a/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java b/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java
index 54a590e..dfec0dc 100644
--- a/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java
+++ b/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java
@@ -19,10 +19,6 @@
 
 package com.epam.dlab.billing;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
 public enum DlabResourceType {
 	SSN,
 	SSN_BUCKET,
@@ -51,88 +47,6 @@
 		return null;
 	}
 
-	public static String getResourceTypeName(String id) {
-		DlabResourceType resourceTypeId = DlabResourceType.of(id);
-		if (resourceTypeId != null) {
-			switch (resourceTypeId) {
-				case COMPUTATIONAL:
-					return "Cluster";
-				case EXPLORATORY:
-					return "Notebook";
-				case EDGE:
-					return "Edge Node";
-				case VOLUME:
-					return "Volume";
-				case EDGE_BUCKET:
-				case SSN_BUCKET:
-				case COLLABORATION_BUCKET:
-					return "Bucket";
-				case EDGE_CONTAINER:
-				case SSN_CONTAINER:
-				case COLLABORATION_CONTAINER:
-					return "Container";
-				case SSN_STORAGE_ACCOUNT:
-				case EDGE_STORAGE_ACCOUNT:
-				case COLLABORATION_STORAGE_ACCOUNT:
-					return "Storage Account";
-				case SSN:
-					return "SSN";
-				case DATA_LAKE_STORE:
-					return "Data Lake Store Account";
-			}
-		}
-		return id;
-	}
-
-	public static List<String> getResourceTypeIds(List<String> names) {
-		if (names == null || names.isEmpty()) {
-			return Collections.emptyList();
-		}
-
-		List<String> list = new ArrayList<>();
-		names.forEach(e -> {
-			switch (e) {
-				case "Cluster":
-					list.add(DlabResourceType.COMPUTATIONAL.toString());
-					break;
-				case "Notebook":
-					list.add(DlabResourceType.EXPLORATORY.toString());
-					break;
-				case "Edge Node":
-					list.add(DlabResourceType.EDGE.toString());
-					break;
-				case "Bucket":
-					list.add(DlabResourceType.EDGE_BUCKET.toString());
-					list.add(DlabResourceType.SSN_BUCKET.toString());
-					list.add(DlabResourceType.COLLABORATION_BUCKET.toString());
-					break;
-				case "Container":
-					list.add(DlabResourceType.EDGE_CONTAINER.toString());
-					list.add(DlabResourceType.SSN_CONTAINER.toString());
-					list.add(DlabResourceType.COLLABORATION_CONTAINER.toString());
-					break;
-				case "SSN":
-					list.add(DlabResourceType.SSN.toString());
-					break;
-				case "Storage Account":
-					list.add(DlabResourceType.SSN_STORAGE_ACCOUNT.toString());
-					list.add(DlabResourceType.EDGE_STORAGE_ACCOUNT.toString());
-					list.add(DlabResourceType.COLLABORATION_STORAGE_ACCOUNT.toString());
-					break;
-				case "Data Lake Store Account":
-					list.add(DlabResourceType.DATA_LAKE_STORE.toString());
-					break;
-				case "Volume":
-					list.add(DlabResourceType.VOLUME.toString());
-					break;
-				default:
-					list.add(e);
-			}
-		});
-
-		return list;
-	}
-
 	@Override
 	public String toString() {
 		return super.toString().toUpperCase();
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
index 65fb838..16d36be 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
@@ -30,6 +30,7 @@
     private String instanceId;
     @JsonProperty("exploratory_name")
     private String exploratoryName;
+    private String project;
     @JsonProperty("exploratory_id")
     private String exploratoryId;
     @JsonProperty("exploratory_template_name")
@@ -61,6 +62,19 @@
         return self;
     }
 
+    public String getProject() {
+        return project;
+    }
+
+    public void setProject(String project) {
+        this.project = project;
+    }
+
+    public T withProject(String project) {
+        setProject(project);
+        return self;
+    }
+
     public String getExploratoryId() {
         return exploratoryId;
     }
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/UserInstanceDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/UserInstanceDTO.java
index 5dae5ef..1950312 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/UserInstanceDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/UserInstanceDTO.java
@@ -28,7 +28,11 @@
 import lombok.Data;
 
 import java.time.LocalDateTime;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Stores info about the user notebook.
@@ -52,6 +56,8 @@
 	private String project;
 	@JsonProperty("endpoint")
 	private String endpoint;
+	@JsonProperty("cloud_provider")
+	private String cloudProvider;
 	@JsonProperty("template_name")
 	private String templateName;
 	@JsonProperty
@@ -174,6 +180,11 @@
 		return this;
 	}
 
+	public UserInstanceDTO withCloudProvider(String cloudProvider) {
+		setCloudProvider(cloudProvider);
+		return this;
+	}
+
 	public UserInstanceDTO withTags(Map<String, String> tags) {
 		setTags(tags);
 		return this;
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/AwsCloudSettings.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/AwsCloudSettings.java
index c27e1b2..1903f30 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/AwsCloudSettings.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/AwsCloudSettings.java
@@ -70,8 +70,24 @@
 	protected String confKeyDir;
 	@JsonProperty("conf_image_enabled")
 	private String imageEnabled;
-	@JsonProperty("conf_shared_image_enabled")
-	private String sharedImageEnabled;
+	@JsonProperty("conf_stepcerts_enabled")
+	private String stepCertsEnabled;
+	@JsonProperty("conf_stepcerts_root_ca")
+	private String stepCertsRootCA;
+	@JsonProperty("conf_stepcerts_kid")
+	private String stepCertsKid;
+	@JsonProperty("conf_stepcerts_kid_password")
+	private String stepCertsKidPassword;
+	@JsonProperty("conf_stepcerts_ca_url")
+	private String stepCertsCAURL;
+	@JsonProperty("keycloak_auth_server_url")
+	private String keycloakAuthServerUrl;
+	@JsonProperty("keycloak_realm_name")
+	private String keycloakRealmName;
+	@JsonProperty("keycloak_user")
+	private String keycloakUser;
+	@JsonProperty("keycloak_user_password")
+	private String keycloakUserPassword;
 
 	@Override
 	@JsonIgnore
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/ComputationalCreateAws.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/ComputationalCreateAws.java
index 5a7abdf..77021e0 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/ComputationalCreateAws.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/ComputationalCreateAws.java
@@ -40,6 +40,8 @@
 	private String version;
 	@JsonProperty("emr_configurations")
 	private List<ClusterConfig> config;
+	@JsonProperty("conf_shared_image_enabled")
+	private String sharedImageEnabled;
 
 	public String getInstanceCount() {
 		return instanceCount;
@@ -132,6 +134,19 @@
 		return this;
 	}
 
+	public String getSharedImageEnabled() {
+		return sharedImageEnabled;
+	}
+
+	public void setSharedImageEnabled(String sharedImageEnabled) {
+		this.sharedImageEnabled = sharedImageEnabled;
+	}
+
+	public ComputationalCreateAws withSharedImageEnabled(String sharedImageEnabled) {
+		setSharedImageEnabled(sharedImageEnabled);
+		return this;
+	}
+
 	@Override
 	public ToStringHelper toStringHelper(Object self) {
 		return super.toStringHelper(self)
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/SparkComputationalCreateAws.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/SparkComputationalCreateAws.java
index d82628d..539937e 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/SparkComputationalCreateAws.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/aws/computational/SparkComputationalCreateAws.java
@@ -35,6 +35,8 @@
 	private String dataEngineMasterShape;
 	@JsonProperty("spark_configurations")
 	private List<ClusterConfig> config;
+	@JsonProperty("conf_shared_image_enabled")
+	private String sharedImageEnabled;
 
 	public SparkComputationalCreateAws withDataEngineInstanceCount(String dataEngineInstanceCount) {
 		this.dataEngineInstanceCount = dataEngineInstanceCount;
@@ -56,6 +58,11 @@
 		return this;
 	}
 
+	public SparkComputationalCreateAws withSharedImageEnabled(String sharedImageEnabled) {
+		this.sharedImageEnabled = sharedImageEnabled;
+		return this;
+	}
+
 	@Override
 	public MoreObjects.ToStringHelper toStringHelper(Object self) {
 		return super.toStringHelper(self)
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/AzureCloudSettings.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/AzureCloudSettings.java
index 19e4f56..07013c6 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/AzureCloudSettings.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/AzureCloudSettings.java
@@ -36,6 +36,12 @@
     private String azureRegion;
     @JsonProperty("azure_iam_user")
     private String azureIamUser;
+    @JsonProperty("conf_service_base_name")
+    protected String sbn;
+    @JsonProperty("conf_os_family")
+    protected String os;
+    @JsonProperty("conf_cloud_provider")
+    protected String cloud;
     @JsonProperty("azure_vpc_name")
     private String azureVpcName;
     @JsonProperty("azure_subnet_name")
@@ -44,12 +50,38 @@
     private String azureResourceGroupName;
     @JsonProperty("azure_security_group_name")
     private String azureSecurityGroupName;
+    @JsonProperty("ldap_hostname")
+    protected String ldapHost;
+    @JsonProperty("ldap_dn")
+    protected String ldapDn;
+    @JsonProperty("ldap_ou")
+    protected String ldapOu;
+    @JsonProperty("ldap_service_username")
+    protected String ldapUser;
+    @JsonProperty("ldap_service_password")
+    protected String ldapPassword;
     @JsonProperty("conf_key_dir")
     protected String confKeyDir;
     @JsonProperty("conf_image_enabled")
     private String imageEnabled;
-    @JsonProperty("conf_shared_image_enabled")
-    private String sharedImageEnabled;
+    @JsonProperty("conf_stepcerts_enabled")
+    private String stepCertsEnabled;
+    @JsonProperty("conf_stepcerts_root_ca")
+    private String stepCertsRootCA;
+    @JsonProperty("conf_stepcerts_kid")
+    private String stepCertsKid;
+    @JsonProperty("conf_stepcerts_kid_password")
+    private String stepCertsKidPassword;
+    @JsonProperty("conf_stepcerts_ca_url")
+    private String stepCertsCAURL;
+    @JsonProperty("keycloak_auth_server_url")
+    private String keycloakAuthServerUrl;
+    @JsonProperty("keycloak_realm_name")
+    private String keycloakRealmName;
+    @JsonProperty("keycloak_user")
+    private String keycloakUser;
+    @JsonProperty("keycloak_user_password")
+    private String keycloakUserPassword;
 
     @Override
     @JsonIgnore
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/computational/SparkComputationalCreateAzure.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/computational/SparkComputationalCreateAzure.java
index 1bc79fc..5902906 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/computational/SparkComputationalCreateAzure.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/azure/computational/SparkComputationalCreateAzure.java
@@ -39,6 +39,8 @@
 	private String azureUserRefreshToken;
 	@JsonProperty("spark_configurations")
 	private List<ClusterConfig> config;
+	@JsonProperty("conf_shared_image_enabled")
+	private String sharedImageEnabled;
 
 	public SparkComputationalCreateAzure withDataEngineInstanceCount(String dataEngineInstanceCount) {
 		this.dataEngineInstanceCount = dataEngineInstanceCount;
@@ -70,6 +72,11 @@
 		return this;
 	}
 
+	public SparkComputationalCreateAzure withSharedImageEnabled(String sharedImageEnabled) {
+		this.sharedImageEnabled = sharedImageEnabled;
+		return this;
+	}
+
 	@Override
 	public MoreObjects.ToStringHelper toStringHelper(Object self) {
 		return super.toStringHelper(self)
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/base/project/ProjectResult.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/base/project/ProjectResult.java
index 9134f4a..0c88022 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/base/project/ProjectResult.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/base/project/ProjectResult.java
@@ -28,10 +28,10 @@
 @Data
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class ProjectResult extends StatusBaseDTO<ProjectResult> {
-	private EdgeInfo edgeInfo;
-	@JsonProperty("project_name")
-	private String projectName;
-	@JsonProperty("endpoint_name")
-	private String endpointName;
+    private EdgeInfo edgeInfo;
+    @JsonProperty("project_name")
+    private String projectName;
+    @JsonProperty("endpoint_name")
+    private String endpointName;
 
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/azure/AzureBillingFilter.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingData.java
similarity index 68%
rename from services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/azure/AzureBillingFilter.java
rename to services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingData.java
index cabb762..c95a02e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/azure/AzureBillingFilter.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingData.java
@@ -17,25 +17,28 @@
  * under the License.
  */
 
-package com.epam.dlab.backendapi.resources.dto.azure;
+package com.epam.dlab.dto.billing;
 
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
+import lombok.Builder;
 import lombok.Data;
 
-import java.util.Collections;
-import java.util.List;
+import java.time.LocalDate;
 
 @Data
+@Builder
 @JsonIgnoreProperties(ignoreUnknown = true)
-public class AzureBillingFilter extends BillingFilter {
-    @JsonProperty("size")
-    private List<String> nodeSize;
-    private List<String> category = Collections.emptyList();
-
-    @Override
-    public List<String> getShapes() {
-        return nodeSize;
-    }
+public class BillingData {
+    private final String tag;
+    private String application;
+    @JsonProperty("from")
+    private LocalDate usageDateFrom;
+    @JsonProperty("to")
+    private LocalDate usageDateTo;
+    private String product;
+    private String usageType;
+    private Double cost;
+    private String currency;
+    private final String usageDate;
 }
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingResourceType.java
similarity index 81%
copy from integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
copy to services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingResourceType.java
index 1e49a60..7ad1082 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingResourceType.java
@@ -17,7 +17,15 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.docker;
+package com.epam.dlab.dto.billing;
 
-class Labels {
+public enum BillingResourceType {
+    EDGE,
+    SSN,
+    ENDPOINT,
+    BUCKET,
+    VOLUME,
+    EXPLORATORY,
+    COMPUTATIONAL,
+    IMAGE
 }
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/computational/UserComputationalResource.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/computational/UserComputationalResource.java
index 678025f..9f8c021 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/computational/UserComputationalResource.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/computational/UserComputationalResource.java
@@ -59,17 +59,21 @@
 	private LocalDateTime lastActivity;
 	@JsonProperty("master_node_shape")
 	private String masterNodeShape;
+	@JsonProperty("slave_node_shape")
+	private String slaveNodeShape;
 	@JsonProperty("dataengine_instance_shape")
 	private String dataengineShape;
+	@JsonProperty("dataengine_instance_count")
+	private int dataengineInstanceCount;
 	@JsonProperty("instance_id")
 	private String instanceId;
 	protected List<ClusterConfig> config;
-	private Map<String,String> tags;
+	private Map<String, String> tags;
 
 	public UserComputationalResource(String computationalName, String computationalId, String imageName,
 									 String templateName, String status, Date uptime, SchedulerJobDTO schedulerData,
 									 boolean reuploadKeyRequired, List<ResourceURL> resourceUrl,
-									 LocalDateTime lastActivity, Map<String,String> tags) {
+									 LocalDateTime lastActivity, Map<String, String> tags) {
 		this.computationalName = computationalName;
 		this.computationalId = computationalId;
 		this.imageName = imageName;
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryCreateDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryCreateDTO.java
index 7f87951..cb32efb 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryCreateDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryCreateDTO.java
@@ -41,6 +41,8 @@
 	private Map<String, String> tags;
 	@JsonProperty("endpoint_name")
 	private String endpoint;
+	@JsonProperty("conf_shared_image_enabled")
+	private String sharedImageEnabled;
 
 	/**
 	 * Return the list of GIT credentials.
@@ -82,6 +84,11 @@
 		return self;
 	}
 
+	public T withSharedImageEnabled(String sharedImageEnabled) {
+		this.sharedImageEnabled = sharedImageEnabled;
+		return self;
+	}
+
 	public String getImageName() {
 		return imageName;
 	}
@@ -103,6 +110,14 @@
 		this.endpoint = endpoint;
 	}
 
+	public String getSharedImageEnabled() {
+		return sharedImageEnabled;
+	}
+
+	public void setSharedImageEnabled(String sharedImageEnabled) {
+		this.sharedImageEnabled = sharedImageEnabled;
+	}
+
 	public List<ClusterConfig> getClusterConfig() {
 		return clusterConfig;
 	}
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java
index 0dad8e4..b41f432 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java
@@ -35,6 +35,8 @@
 	private Map<String, String> tags;
 	@JsonProperty("endpoint_name")
 	private String endpoint;
+	@JsonProperty("conf_shared_image_enabled")
+	private String sharedImageEnabled;
 
 	public ExploratoryImageDTO withImageName(String imageName) {
 		this.imageName = imageName;
@@ -51,6 +53,11 @@
 		return this;
 	}
 
+	public ExploratoryImageDTO withSharedImageEnabled(String sharedImageEnabled) {
+		this.sharedImageEnabled = sharedImageEnabled;
+		return this;
+	}
+
 	@Override
 	public MoreObjects.ToStringHelper toStringHelper(Object self) {
 		return super.toStringHelper(self)
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ImageCreateStatusDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ImageCreateStatusDTO.java
index 23da13a..44556a7 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ImageCreateStatusDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ImageCreateStatusDTO.java
@@ -33,7 +33,8 @@
 	private ImageCreateDTO imageCreateDTO;
 	private String name;
 	private String exploratoryName;
-
+	private String project;
+	private String endpoint;
 
 	public ImageCreateStatusDTO withImageCreateDto(ImageCreateDTO imageCreateDto) {
 		setImageCreateDTO(imageCreateDto);
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/GcpCloudSettings.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/GcpCloudSettings.java
index 370da91..c12c39a 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/GcpCloudSettings.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/GcpCloudSettings.java
@@ -64,8 +64,24 @@
 	protected String region;
 	@JsonProperty("conf_image_enabled")
 	private String imageEnabled;
-	@JsonProperty("conf_shared_image_enabled")
-	private String sharedImageEnabled;
+	@JsonProperty("conf_stepcerts_enabled")
+	private String stepCertsEnabled;
+	@JsonProperty("conf_stepcerts_root_ca")
+	private String stepCertsRootCA;
+	@JsonProperty("conf_stepcerts_kid")
+	private String stepCertsKid;
+	@JsonProperty("conf_stepcerts_kid_password")
+	private String stepCertsKidPassword;
+	@JsonProperty("conf_stepcerts_ca_url")
+	private String stepCertsCAURL;
+	@JsonProperty("keycloak_auth_server_url")
+	private String keycloakAuthServerUrl;
+	@JsonProperty("keycloak_realm_name")
+	private String keycloakRealmName;
+	@JsonProperty("keycloak_user")
+	private String keycloakUser;
+	@JsonProperty("keycloak_user_password")
+	private String keycloakUserPassword;
 
 	@Override
 	@JsonIgnore
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/ComputationalCreateGcp.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/ComputationalCreateGcp.java
index c65d2ff..1ab5698 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/ComputationalCreateGcp.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/ComputationalCreateGcp.java
@@ -36,6 +36,8 @@
     private String preemptibleCount;
     @JsonProperty("dataproc_version")
     private String version;
+    @JsonProperty("conf_shared_image_enabled")
+    private String sharedImageEnabled;
 
     public ComputationalCreateGcp withMasterInstanceCount(String masterInstanceCount) {
         this.masterInstanceCount = masterInstanceCount;
@@ -67,6 +69,19 @@
         return this;
     }
 
+    public String getSharedImageEnabled() {
+        return sharedImageEnabled;
+    }
+
+    public void setSharedImageEnabled(String sharedImageEnabled) {
+        this.sharedImageEnabled = sharedImageEnabled;
+    }
+
+    public ComputationalCreateGcp withSharedImageEnabled(String sharedImageEnabled) {
+        setSharedImageEnabled(sharedImageEnabled);
+        return this;
+    }
+
     @Override
     public MoreObjects.ToStringHelper toStringHelper(Object self) {
         return super.toStringHelper(self)
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/SparkComputationalCreateGcp.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/SparkComputationalCreateGcp.java
index 98cfb32..e21881b 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/SparkComputationalCreateGcp.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/gcp/computational/SparkComputationalCreateGcp.java
@@ -37,6 +37,8 @@
 	private String dataEngineMasterSize;
 	@JsonProperty("spark_configurations")
 	private List<ClusterConfig> config;
+	@JsonProperty("conf_shared_image_enabled")
+	private String sharedImageEnabled;
 
 	public SparkComputationalCreateGcp withDataEngineInstanceCount(String dataEngineInstanceCount) {
 		this.dataEngineInstanceCount = dataEngineInstanceCount;
@@ -58,6 +60,11 @@
 		return this;
 	}
 
+	public SparkComputationalCreateGcp withSharedImageEnabled(String sharedImageEnabled) {
+		this.sharedImageEnabled = sharedImageEnabled;
+		return this;
+	}
+
 
 	@Override
 	public MoreObjects.ToStringHelper toStringHelper(Object self) {
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectActionDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectActionDTO.java
index b8e4718..93b955e 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectActionDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectActionDTO.java
@@ -27,8 +27,8 @@
 @Data
 @AllArgsConstructor
 public class ProjectActionDTO extends ResourceBaseDTO<ProjectActionDTO> {
-	@JsonProperty("project_name")
-	private final String name;
-	@JsonProperty("endpoint_name")
-	private final String endpoint;
+    @JsonProperty("project_name")
+    private final String name;
+    @JsonProperty("endpoint_name")
+    private final String endpoint;
 }
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectCreateDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectCreateDTO.java
index 7f5aa09..47b49b2 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectCreateDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/project/ProjectCreateDTO.java
@@ -27,13 +27,11 @@
 @Data
 @Builder
 public class ProjectCreateDTO extends ResourceBaseDTO<ProjectCreateDTO> {
-	private final String key;
-	@JsonProperty("project_name")
-	private final String name;
-	@JsonProperty("project_tag")
-	private final String tag;
-	@JsonProperty("endpoint_name")
-	private final String endpoint;
-	@JsonProperty("shared_image_enabled")
-	private String useSharedImage;
+    private final String key;
+    @JsonProperty("project_name")
+    private final String name;
+    @JsonProperty("project_tag")
+    private final String tag;
+    @JsonProperty("endpoint_name")
+    private final String endpoint;
 }
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/model/aws/ReportLine.java b/services/dlab-model/src/main/java/com/epam/dlab/model/aws/ReportLine.java
index f80544d..46c82f0 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/model/aws/ReportLine.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/model/aws/ReportLine.java
@@ -33,6 +33,7 @@
 public class ReportLine {
 	public static final String FIELD_DLAB_ID = "dlab_id";
 	public static final String FIELD_USER_ID = "user";
+	public static final String FIELD_PROJECT = "project";
 	public static final String FIELD_USAGE_DATE = "usage_date";
 	public static final String FIELD_PRODUCT = "product";
 	public static final String FIELD_USAGE_TYPE = "usage_type";
diff --git a/services/dlab-webapp-common/src/main/java/com/epam/dlab/ServiceConfiguration.java b/services/dlab-webapp-common/src/main/java/com/epam/dlab/ServiceConfiguration.java
index 85d4318..3297abd 100644
--- a/services/dlab-webapp-common/src/main/java/com/epam/dlab/ServiceConfiguration.java
+++ b/services/dlab-webapp-common/src/main/java/com/epam/dlab/ServiceConfiguration.java
@@ -54,6 +54,11 @@
 
     @Valid
     @NotNull
+    @JsonProperty(ServiceConsts.BILLING_SERVICE_NAME)
+    private RESTServiceFactory billingFactory = new RESTServiceFactory();
+
+    @Valid
+    @NotNull
     @JsonProperty(ServiceConsts.SECURITY_SERVICE_NAME)
     private RESTServiceFactory securityFactory;
 
@@ -85,6 +90,10 @@
         return provisioningFactory;
     }
 
+    public RESTServiceFactory getBillingFactory() {
+        return billingFactory;
+    }
+
     public RESTServiceFactory getSecurityFactory() {
         return securityFactory;
     }
diff --git a/services/dlab-webapp-common/src/main/java/com/epam/dlab/constants/ServiceConsts.java b/services/dlab-webapp-common/src/main/java/com/epam/dlab/constants/ServiceConsts.java
index d376665..e1bcf23 100644
--- a/services/dlab-webapp-common/src/main/java/com/epam/dlab/constants/ServiceConsts.java
+++ b/services/dlab-webapp-common/src/main/java/com/epam/dlab/constants/ServiceConsts.java
@@ -20,13 +20,14 @@
 package com.epam.dlab.constants;
 
 public final class ServiceConsts {
-	public static final String MONGO_NAME = "mongo";
-	public static final String PROVISIONING_SERVICE_NAME = "provisioningService";
-	public static final String MAVEN_SEARCH_API = "mavenSearchService";
-	public static final String SECURITY_SERVICE_NAME = "securityService";
-	public static final String SELF_SERVICE_NAME = "selfService";
-	public static final String PROVISIONING_USER_AGENT = "provisioning-service";
+    public static final String MONGO_NAME = "mongo";
+    public static final String PROVISIONING_SERVICE_NAME = "provisioningService";
+    public static final String BILLING_SERVICE_NAME = "billingService";
+    public static final String MAVEN_SEARCH_API = "mavenSearchService";
+    public static final String SECURITY_SERVICE_NAME = "securityService";
+    public static final String SELF_SERVICE_NAME = "selfService";
+    public static final String PROVISIONING_USER_AGENT = "provisioning-service";
 
-	private ServiceConsts() {
-	}
+    private ServiceConsts() {
+    }
 }
diff --git a/services/dlab-webapp-common/src/main/java/com/epam/dlab/rest/client/RESTService.java b/services/dlab-webapp-common/src/main/java/com/epam/dlab/rest/client/RESTService.java
index 5008bcd..5b4dbba 100644
--- a/services/dlab-webapp-common/src/main/java/com/epam/dlab/rest/client/RESTService.java
+++ b/services/dlab-webapp-common/src/main/java/com/epam/dlab/rest/client/RESTService.java
@@ -25,6 +25,7 @@
 import javax.ws.rs.client.Entity;
 import javax.ws.rs.client.Invocation;
 import javax.ws.rs.client.WebTarget;
+import javax.ws.rs.core.GenericType;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import java.net.URI;
@@ -52,9 +53,7 @@
 	}
 
 	public <T> T get(String path, Class<T> clazz) {
-		Invocation.Builder builder = getBuilder(path);
-		log.debug("REST get {}", path);
-		return builder.get(clazz);
+		return get(path, null, clazz);
 	}
 
 	public <T> T get(URI path, Class<T> clazz) {
@@ -64,22 +63,32 @@
 				.get(clazz);
 	}
 
-	public <T> T post(String path, Object parameter, Class<T> clazz) {
-		Invocation.Builder builder = getBuilder(path);
-		log.debug("REST post {}", path);
-		return builder.post(Entity.json(parameter), clazz);
-	}
-
 	public <T> T get(String path, String accessToken, Class<T> clazz) {
 		Invocation.Builder builder = getBuilder(path, accessToken, Collections.emptyMap());
 		log.debug("REST get secured {} {}", path, accessToken);
 		return builder.get(clazz);
 	}
 
+	public <T> T get(String path, GenericType<T> genericType) {
+		return get(path, null, genericType);
+	}
+
+	public <T> T get(String path, String accessToken, GenericType<T> genericType) {
+		return get(path, accessToken, genericType, Collections.emptyMap());
+	}
+
+	public <T> T get(String path, String accessToken, GenericType<T> genericType, Map<String, Object> queryParams) {
+		Invocation.Builder builder = getBuilder(path, accessToken, queryParams);
+		log.debug("REST get secured {} {}", path, accessToken);
+		return builder.get(genericType);
+	}
+
+	public <T> T post(String path, Object parameter, Class<T> clazz) {
+		return post(path, null, parameter, clazz);
+	}
+
 	public <T> T post(String path, String accessToken, Object parameter, Class<T> clazz) {
-		Invocation.Builder builder = getBuilder(path, accessToken, Collections.emptyMap());
-		log.debug("REST post secured {} {}", path, accessToken);
-		return builder.post(Entity.json(parameter), clazz);
+		return post(path, accessToken, parameter, clazz, Collections.emptyMap());
 	}
 
 	public <T> T post(String path, String accessToken, Object parameter, Class<T> clazz,
@@ -89,9 +98,6 @@
 		return builder.post(Entity.json(parameter), clazz);
 	}
 
-	private Invocation.Builder getBuilder(String path) {
-		return getBuilder(path, null, Collections.emptyMap());
-	}
 
 	private Invocation.Builder getBuilder(String path, String token, Map<String, Object> queryParams) {
 		WebTarget webTarget = getWebTarget(path);
diff --git a/services/provisioning-service/provisioning.yml b/services/provisioning-service/provisioning.yml
index 96fbdf4..53c303e 100644
--- a/services/provisioning-service/provisioning.yml
+++ b/services/provisioning-service/provisioning.yml
@@ -27,7 +27,7 @@
 responseDirectory: /opt/dlab/tmp
 handlerDirectory: /opt/dlab/handlers
 dockerLogDirectory: ${LOG_ROOT_DIR}
-warmupPollTimeout: 50s
+warmupPollTimeout: 2m
 resourceStatusPollTimeout: 300m
 keyLoaderPollTimeout: 30m
 requestEnvStatusTimeout: 50s
@@ -58,7 +58,7 @@
 #    - type: http
     - type: https
       port: 8084
-      certAlias: dlab
+      certAlias: ssn
       validateCerts: false
       keyStorePath: ${KEY_STORE_PATH}
       keyStorePassword: ${KEY_STORE_PASSWORD}
@@ -68,7 +68,7 @@
 #    - type: http
     - type: https
       port: 8085
-      certAlias: dlab
+      certAlias: ssn
       validateCerts: false
       keyStorePath: ${KEY_STORE_PATH}
       keyStorePassword: ${KEY_STORE_PASSWORD}
@@ -123,10 +123,21 @@
   azureClientId: AZURE_CLIENT_ID
   gcpProjectId: GCP_PROJECT_ID
   imageEnabled: CONF_IMAGE_ENABLED
-  sharedImageEnabled: SHARED_IMAGE_ENABLED
+  azureAuthFile: AZURE_AUTH_FILE_PATH
   ldap:
     host: LDAP_HOST
     dn: LDAP_DN
     ou: LDAP_OU
     user: LDAP_USER_NAME
-    password: LDAP_USER_PASSWORD
\ No newline at end of file
+    password: LDAP_USER_PASSWORD
+  stepCerts:
+    enabled: STEP_CERTS_ENABLED
+    rootCA: STEP_ROOT_CA
+    kid: STEP_KID_ID
+    kidPassword: STEP_KID_PASSWORD
+    caURL: STEP_CA_URL
+  keycloak:
+    auth_server_url: KEYCLOAK_AUTH_SERVER_URL
+    realm_name: KEYCLOAK_REALM_NAME
+    user: KEYCLOAK_USER_NAME
+    user_password: KEYCLOAK_PASSWORD
\ No newline at end of file
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/CloudConfiguration.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/CloudConfiguration.java
index 5653a90..9d61210 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/CloudConfiguration.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/CloudConfiguration.java
@@ -42,13 +42,15 @@
 	private final String ssnStorageAccountTagName;
 	private final String sharedStorageAccountTagName;
 	private final String datalakeTagName;
+	private final String azureAuthFile;
 	private final String azureClientId;
 	private final String peeringId;
 	private final String gcpProjectId;
 	private final boolean imageEnabled;
-	private final boolean sharedImageEnabled;
 	@JsonProperty("ldap")
 	private final LdapConfig ldapConfig;
+	private final StepCerts stepCerts;
+	private final Keycloak keycloak;
 
 	@Data
 	public static class LdapConfig {
@@ -58,4 +60,24 @@
 		private final String user;
 		private final String password;
 	}
+
+	@Data
+	public static class StepCerts {
+		private final boolean enabled;
+		private final String rootCA;
+		private final String kid;
+		private final String kidPassword;
+		private final String caURL;
+	}
+
+	@Data
+	public static class Keycloak {
+		@JsonProperty("auth_server_url")
+		private final String authServerUrl;
+		@JsonProperty("realm_name")
+		private final String realmName;
+		private final String user;
+		@JsonProperty("user_password")
+		private final String userPassword;
+	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplication.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplication.java
index e4fd269..6f1047b 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplication.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplication.java
@@ -144,6 +144,6 @@
 		jersey.register(injector.getInstance(KeyResource.class));
 		jersey.register(injector.getInstance(CallbackHandlerResource.class));
 		jersey.register(injector.getInstance(ProjectResource.class));
-
+		jersey.register(injector.getInstance(ProvisioningHealthCheckResource.class));
 	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java
index 912167a..1025ad6 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java
@@ -49,7 +49,7 @@
 	private String handlerDirectory;
 
 	@JsonProperty
-	private Duration warmupPollTimeout = Duration.seconds(3);
+	private Duration warmupPollTimeout;
 
 	@JsonProperty
 	private Duration resourceStatusPollTimeout = Duration.minutes(3);
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandBuilder.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandBuilder.java
index 68e337b..a212d0a 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandBuilder.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandBuilder.java
@@ -69,33 +69,56 @@
 		final CloudProvider cloudProvider = conf.getCloudProvider();
 		final CloudConfiguration cloudConfiguration = conf.getCloudConfiguration();
 		final CloudConfiguration.LdapConfig ldapConfig = cloudConfiguration.getLdapConfig();
+		final CloudConfiguration.StepCerts stepCerts = cloudConfiguration.getStepCerts();
+		final CloudConfiguration.Keycloak keycloak = cloudConfiguration.getKeycloak();
 		if (cloudProvider == CloudProvider.AWS) {
-			return awsCloudSettings(settings, cloudConfiguration, ldapConfig);
+			return awsCloudSettings(settings, cloudConfiguration, ldapConfig, stepCerts, keycloak);
 		} else if (cloudProvider == CloudProvider.GCP) {
-			return gcpCloudSettings(settings, cloudConfiguration, ldapConfig);
+			return gcpCloudSettings(settings, cloudConfiguration, ldapConfig, stepCerts, keycloak);
 		} else if (cloudProvider == CloudProvider.AZURE) {
-			return azureCloudSettings(settings, cloudConfiguration);
+			return azureCloudSettings(settings, cloudConfiguration, ldapConfig, stepCerts, keycloak);
 		} else {
 			throw new UnsupportedOperationException("Unsupported cloud provider " + cloudProvider.getName());
 		}
 	}
 
-	private AzureCloudSettings azureCloudSettings(CloudSettings settings, CloudConfiguration cloudConfiguration) {
+	private AzureCloudSettings azureCloudSettings(CloudSettings settings, CloudConfiguration cloudConfiguration,
+												  CloudConfiguration.LdapConfig ldapConfig,
+												  CloudConfiguration.StepCerts stepCerts,
+												  CloudConfiguration.Keycloak keycloak) {
 		return AzureCloudSettings.builder()
 				.azureRegion(cloudConfiguration.getRegion())
 				.azureResourceGroupName(cloudConfiguration.getAzureResourceGroupName())
 				.azureSecurityGroupName(cloudConfiguration.getSecurityGroupIds())
+				.ldapDn(ldapConfig.getDn())
+				.ldapHost(ldapConfig.getHost())
+				.ldapOu(ldapConfig.getOu())
+				.ldapUser(ldapConfig.getUser())
+				.ldapPassword(ldapConfig.getPassword())
 				.azureSubnetName(cloudConfiguration.getSubnetId())
 				.azureVpcName(cloudConfiguration.getVpcId())
 				.confKeyDir(cloudConfiguration.getConfKeyDir())
 				.azureIamUser(settings.getIamUser())
+				.sbn(cloudConfiguration.getServiceBaseName())
+				.os(cloudConfiguration.getOs())
+				.cloud(conf.getCloudProvider().getName())
 				.imageEnabled(String.valueOf(cloudConfiguration.isImageEnabled()))
-				.sharedImageEnabled(String.valueOf(cloudConfiguration.isSharedImageEnabled()))
+				.stepCertsEnabled(String.valueOf(stepCerts.isEnabled()))
+				.stepCertsRootCA(stepCerts.getRootCA())
+				.stepCertsKid(stepCerts.getKid())
+				.stepCertsKidPassword(stepCerts.getKidPassword())
+				.stepCertsCAURL(stepCerts.getCaURL())
+				.keycloakAuthServerUrl(keycloak.getAuthServerUrl())
+				.keycloakRealmName(keycloak.getRealmName())
+				.keycloakUser(keycloak.getUser())
+				.keycloakUserPassword(keycloak.getUserPassword())
 				.build();
 	}
 
 	private GcpCloudSettings gcpCloudSettings(CloudSettings settings, CloudConfiguration cloudConfiguration,
-											  CloudConfiguration.LdapConfig ldapConfig) {
+											  CloudConfiguration.LdapConfig ldapConfig,
+											  CloudConfiguration.StepCerts stepCerts,
+											  CloudConfiguration.Keycloak keycloak) {
 		return GcpCloudSettings.builder()
 				.projectId(cloudConfiguration.getGcpProjectId())
 				.vpcName(cloudConfiguration.getVpcId())
@@ -113,12 +136,22 @@
 				.confKeyDir(cloudConfiguration.getConfKeyDir())
 				.gcpIamUser(settings.getIamUser())
 				.imageEnabled(String.valueOf(cloudConfiguration.isImageEnabled()))
-				.sharedImageEnabled(String.valueOf(cloudConfiguration.isSharedImageEnabled()))
+				.stepCertsEnabled(String.valueOf(stepCerts.isEnabled()))
+				.stepCertsRootCA(stepCerts.getRootCA())
+				.stepCertsKid(stepCerts.getKid())
+				.stepCertsKidPassword(stepCerts.getKidPassword())
+				.stepCertsCAURL(stepCerts.getCaURL())
+				.keycloakAuthServerUrl(keycloak.getAuthServerUrl())
+				.keycloakRealmName(keycloak.getRealmName())
+				.keycloakUser(keycloak.getUser())
+				.keycloakUserPassword(keycloak.getUserPassword())
 				.build();
 	}
 
 	private AwsCloudSettings awsCloudSettings(CloudSettings settings, CloudConfiguration cloudConfiguration,
-											  CloudConfiguration.LdapConfig ldapConfig) {
+											  CloudConfiguration.LdapConfig ldapConfig,
+											  CloudConfiguration.StepCerts stepCerts,
+											  CloudConfiguration.Keycloak keycloak) {
 		return AwsCloudSettings.builder()
 				.awsRegion(cloudConfiguration.getRegion())
 				.awsSecurityGroupIds(cloudConfiguration.getSecurityGroupIds())
@@ -139,7 +172,15 @@
 				.os(cloudConfiguration.getOs())
 				.confKeyDir(cloudConfiguration.getConfKeyDir())
 				.imageEnabled(String.valueOf(cloudConfiguration.isImageEnabled()))
-				.sharedImageEnabled(String.valueOf(cloudConfiguration.isSharedImageEnabled()))
+				.stepCertsEnabled(String.valueOf(stepCerts.isEnabled()))
+				.stepCertsRootCA(stepCerts.getRootCA())
+				.stepCertsKid(stepCerts.getKid())
+				.stepCertsKidPassword(stepCerts.getKidPassword())
+				.stepCertsCAURL(stepCerts.getCaURL())
+				.keycloakAuthServerUrl(keycloak.getAuthServerUrl())
+				.keycloakRealmName(keycloak.getRealmName())
+				.keycloakUser(keycloak.getUser())
+				.keycloakUserPassword(keycloak.getUserPassword())
 				.build();
 	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMock.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMock.java
index d1e2038..1cb6e43 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMock.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMock.java
@@ -78,6 +78,8 @@
 			List<String> list = Lists.newArrayList(
 					"docker.dlab-deeplearning:latest",
 					"docker.dlab-jupyter:latest",
+					"docker.dlab-jupyterlab:latest",
+					"docker.dlab-superset:latest",
 					"docker.dlab-rstudio:latest",
 					"docker.dlab-tensor:latest",
 					"docker.dlab-zeppelin:latest",
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/RunDockerCommand.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/RunDockerCommand.java
index 9cab93a..4c466dc 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/RunDockerCommand.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/commands/RunDockerCommand.java
@@ -33,6 +33,7 @@
     private static final String ROOT_KEYS_PATH = "/root/keys";
     private static final String RESPONSE_PATH = "/response";
     private static final String LOG_PATH = "/logs";
+    private static final String AZURE_AUTH_FILE = "/root/azure_auth.json";
 
     public RunDockerCommand withVolume(String hostSrcPath, String bindPath) {
         options.add(String.format("-v %s:%s", hostSrcPath, bindPath));
@@ -47,6 +48,10 @@
         return withVolume(hostSrcPath, RESPONSE_PATH);
     }
 
+    public RunDockerCommand withVolumeFoAzureAuthFile(String hostSrcPath) {
+        return withVolume(hostSrcPath, AZURE_AUTH_FILE);
+    }
+
     public RunDockerCommand withVolumeForLog(String hostSrcPath, String logDirectory) {
         return withVolume(Paths.get(hostSrcPath, logDirectory).toString(),
                 Paths.get(LOG_PATH, logDirectory).toString());
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
index 8ccf260..877cc5a 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
@@ -96,8 +96,9 @@
 	@Override
 	protected ComputationalStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
 		return super.getBaseStatusDTO(status)
-				.withExploratoryName(dto.getExploratoryName())
-				.withComputationalName(dto.getComputationalName());
+                .withExploratoryName(dto.getExploratoryName())
+                .withComputationalName(dto.getComputationalName())
+                .withProject(dto.getProject());
 	}
 
 	private String instanceId(JsonNode jsonNode) {
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigure.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigure.java
index adba483..b2aee18 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigure.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigure.java
@@ -22,8 +22,13 @@
 import com.epam.dlab.backendapi.ProvisioningServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.core.Directories;
 import com.epam.dlab.backendapi.core.FileHandlerCallback;
-import com.epam.dlab.backendapi.core.commands.*;
+import com.epam.dlab.backendapi.core.commands.CommandBuilder;
+import com.epam.dlab.backendapi.core.commands.DockerAction;
+import com.epam.dlab.backendapi.core.commands.DockerCommands;
+import com.epam.dlab.backendapi.core.commands.ICommandExecutor;
+import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.folderlistener.FolderListenerExecutor;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.aws.computational.SparkComputationalCreateAws;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
@@ -34,6 +39,8 @@
 import com.google.inject.Singleton;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Objects;
+
 import static com.epam.dlab.backendapi.core.commands.DockerAction.CONFIGURE;
 
 @Slf4j
@@ -81,24 +88,27 @@
 				configuration.getResourceStatusPollTimeout(),
 				getFileHandlerCallback(CONFIGURE, uuid, dto));
 		try {
+			RunDockerCommand runDockerCommand = new RunDockerCommand()
+					.withInteractive()
+					.withName(nameContainer(dto.getEdgeUserName(), CONFIGURE,
+							dto.getExploratoryName(), dto.getComputationalName()))
+					.withVolumeForRootKeys(configuration.getKeyDirectory())
+					.withVolumeForResponse(configuration.getImagesDirectory())
+					.withVolumeForLog(configuration.getDockerLogDirectory(), dataEngineType.getName())
+					.withResource(dataEngineType.getName())
+					.withRequestId(uuid)
+					.withConfKeyName(configuration.getAdminKey())
+					.withActionConfigure(getImageConfigure(dto.getApplicationName(), dataEngineType));
+			if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+					Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+					!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+				runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+			}
+
 			commandExecutor.executeAsync(
 					dto.getEdgeUserName(),
 					uuid,
-					commandBuilder.buildCommand(
-							new RunDockerCommand()
-									.withInteractive()
-									.withName(nameContainer(dto.getEdgeUserName(), CONFIGURE,
-											dto.getExploratoryName(), dto.getComputationalName()))
-									.withVolumeForRootKeys(configuration.getKeyDirectory())
-									.withVolumeForResponse(configuration.getImagesDirectory())
-									.withVolumeForLog(configuration.getDockerLogDirectory(), dataEngineType.getName())
-									.withResource(dataEngineType.getName())
-									.withRequestId(uuid)
-									.withConfKeyName(configuration.getAdminKey())
-									.withActionConfigure(getImageConfigure(dto.getApplicationName(), dataEngineType)),
-							dto
-					)
-			);
+					commandBuilder.buildCommand(runDockerCommand, dto));
 		} catch (Exception t) {
 			throw new DlabException("Could not configure computational resource cluster", t);
 		}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
index c53c86e..8d6e794 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
@@ -56,6 +56,7 @@
 		return baseStatus
 				.withExploratoryName(dto.getExploratoryName())
 				.withComputationalName(dto.getComputationalName())
+				.withProject(dto.getProject())
 				.withUptime(null)
 				.withLastActivity(Date.from(Instant.now()));
 	}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
index 62746e8..047ebf9 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
@@ -50,14 +50,16 @@
 
 	@JsonProperty
 	private final String exploratoryName;
+	private final String project;
 
 	@JsonCreator
 	public ExploratoryCallbackHandler(@JacksonInject RESTService selfService,
 									  @JsonProperty("action") DockerAction action,
 									  @JsonProperty("uuid") String uuid, @JsonProperty("user") String user,
-									  @JsonProperty("exploratoryName") String exploratoryName) {
+									  String project, @JsonProperty("exploratoryName") String exploratoryName) {
 		super(selfService, user, uuid, action);
 		this.exploratoryName = exploratoryName;
+		this.project = project;
 	}
 
 	@Override
@@ -99,6 +101,8 @@
 
 	@Override
 	protected ExploratoryStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
-		return super.getBaseStatusDTO(status).withExploratoryName(exploratoryName);
+		return super.getBaseStatusDTO(status)
+				.withExploratoryName(exploratoryName)
+				.withProject(project);
 	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ImageCreateCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ImageCreateCallbackHandler.java
index 8f185a6..dbbc535 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ImageCreateCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ImageCreateCallbackHandler.java
@@ -40,13 +40,18 @@
 	private final String imageName;
 	@JsonProperty
 	private final String exploratoryName;
+	@JsonProperty
+	private final String project;
+	@JsonProperty
+	private final String endpoint;
 
-	public ImageCreateCallbackHandler(
-			RESTService selfService, String uuid, DockerAction action,
+	public ImageCreateCallbackHandler(RESTService selfService, String uuid, DockerAction action,
 			ExploratoryImageDTO image) {
 		super(selfService, image.getCloudSettings().getIamUser(), uuid, action);
 		this.imageName = image.getImageName();
 		this.exploratoryName = image.getExploratoryName();
+		this.project = image.getProject();
+		this.endpoint = image.getEndpoint();
 	}
 
 	@JsonCreator
@@ -55,10 +60,14 @@
 			@JsonProperty("action") DockerAction action,
 			@JsonProperty("user") String user,
 			@JsonProperty("imageName") String imageName,
-			@JsonProperty("exploratoryName") String exploratoryName) {
+			@JsonProperty("exploratoryName") String exploratoryName,
+			@JsonProperty("project") String projectName,
+			@JsonProperty("endpoint") String endpoint) {
 		super(selfService, user, uuid, action);
 		this.imageName = imageName;
 		this.exploratoryName = exploratoryName;
+		this.project = projectName;
+		this.endpoint = endpoint;
 	}
 
 	@Override
@@ -79,6 +88,8 @@
 		final ImageCreateStatusDTO statusDTO = super.getBaseStatusDTO(status);
 		statusDTO.setExploratoryName(exploratoryName);
 		statusDTO.setName(imageName);
+		statusDTO.setProject(project);
+		statusDTO.setEndpoint(endpoint);
 		statusDTO.withoutImageCreateDto();
 		return statusDTO;
 	}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
index a31fea3..8d46b60 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
@@ -111,7 +111,8 @@
 	protected LibInstallStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
 		return super.getBaseStatusDTO(status)
 				.withExploratoryName(dto.getExploratoryName())
-				.withUptime(Date.from(Instant.now()))
-				.withComputationalName(dto.getComputationalName());
+				.withComputationalName(dto.getComputationalName())
+				.withProject(dto.getProject())
+				.withUptime(Date.from(Instant.now()));
 	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/GitExploratoryResource.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/GitExploratoryResource.java
index 80a3d5b..e6f6476 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/GitExploratoryResource.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/GitExploratoryResource.java
@@ -27,6 +27,7 @@
 import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.handlers.ExploratoryGitCredsCallbackHandler;
 import com.epam.dlab.backendapi.service.impl.DockerService;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.exploratory.ExploratoryBaseDTO;
 import com.epam.dlab.dto.exploratory.ExploratoryGitCredsUpdateDTO;
 import com.fasterxml.jackson.core.JsonProcessingException;
@@ -38,6 +39,7 @@
 import javax.ws.rs.Path;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
+import java.util.Objects;
 
 @Path("/exploratory")
 @Consumes(MediaType.APPLICATION_JSON)
@@ -69,6 +71,11 @@
 				.withConfKeyName(configuration.getAdminKey())
 				.withImage(dto.getNotebookImage())
 				.withAction(action);
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
 
 		commandExecutor.executeAsync(username, uuid, commandBuilder.buildCommand(runDockerCommand, dto));
 		return uuid;
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/ImageResource.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/ImageResource.java
index b57f5b2..2d6d9e3 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/ImageResource.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/ImageResource.java
@@ -26,6 +26,7 @@
 import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.handlers.ImageCreateCallbackHandler;
 import com.epam.dlab.backendapi.service.impl.DockerService;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.exploratory.ExploratoryImageDTO;
 import com.epam.dlab.rest.contracts.ExploratoryAPI;
 import com.fasterxml.jackson.core.JsonProcessingException;
@@ -38,6 +39,7 @@
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+import java.util.Objects;
 
 @Path(ExploratoryAPI.EXPLORATORY_IMAGE)
 @Consumes(MediaType.APPLICATION_JSON)
@@ -64,7 +66,7 @@
 	}
 
 	private RunDockerCommand getDockerCommand(DockerAction action, String uuid, ExploratoryImageDTO image) {
-		return new RunDockerCommand()
+		RunDockerCommand runDockerCommand = new RunDockerCommand()
 				.withInteractive()
 				.withVolumeForRootKeys(configuration.getKeyDirectory())
 				.withVolumeForResponse(configuration.getImagesDirectory())
@@ -75,5 +77,12 @@
 				.withResource(getResourceType())
 				.withImage(image.getNotebookImage())
 				.withName(nameContainer(image.getEdgeUserName(), action.toString(), image.getImageName()));
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
+
+		return runDockerCommand;
 	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/LibraryResource.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/LibraryResource.java
index 0e07d69..a6ae018 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/LibraryResource.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/LibraryResource.java
@@ -28,6 +28,7 @@
 import com.epam.dlab.backendapi.core.response.handlers.LibInstallCallbackHandler;
 import com.epam.dlab.backendapi.core.response.handlers.LibListCallbackHandler;
 import com.epam.dlab.backendapi.service.impl.DockerService;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.LibListComputationalDTO;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.exploratory.ExploratoryActionDTO;
@@ -44,6 +45,7 @@
 import javax.ws.rs.Path;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
+import java.util.Objects;
 
 @Path("/library")
 @Consumes(MediaType.APPLICATION_JSON)
@@ -139,13 +141,20 @@
 	}
 
 	private RunDockerCommand getDockerCommand(DockerAction action, String uuid) {
-		return new RunDockerCommand()
+		RunDockerCommand runDockerCommand = new RunDockerCommand()
 				.withInteractive()
 				.withVolumeForRootKeys(configuration.getKeyDirectory())
 				.withVolumeForResponse(configuration.getImagesDirectory())
 				.withRequestId(uuid)
 				.withConfKeyName(configuration.getAdminKey())
 				.withAction(action);
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
+
+		return runDockerCommand;
 	}
 
 	private FileHandlerCallback getFileHandlerCallbackExploratory(DockerAction action, String uuid,
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/ProvisioningHealthCheckResource.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/ProvisioningHealthCheckResource.java
new file mode 100644
index 0000000..e50d7ae
--- /dev/null
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/ProvisioningHealthCheckResource.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.resources;
+
+
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.ProvisioningServiceApplicationConfiguration;
+import com.google.inject.Inject;
+import io.dropwizard.auth.Auth;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+@Path("/healthcheck")
+@Produces(MediaType.APPLICATION_JSON)
+public class ProvisioningHealthCheckResource {
+    private static final String HEALTH_CHECK = "ProvisioningHealthCheck";
+
+    @Inject
+    private ProvisioningServiceApplicationConfiguration configuration;
+
+    @GET
+    public Response status(@Auth UserInfo ui) {
+        return Response.ok(configuration.getCloudProvider()).build();
+    }
+}
\ No newline at end of file
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/EdgeService.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/EdgeService.java
index a5ed3dc..92cb6e8 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/EdgeService.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/EdgeService.java
@@ -24,6 +24,7 @@
 import com.epam.dlab.backendapi.core.FileHandlerCallback;
 import com.epam.dlab.backendapi.core.commands.*;
 import com.epam.dlab.backendapi.core.response.folderlistener.FolderListenerExecutor;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.ResourceSysBaseDTO;
 import com.epam.dlab.rest.client.RESTService;
 import com.epam.dlab.rest.contracts.KeyAPI;
@@ -33,6 +34,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.Objects;
+
 public abstract class EdgeService implements DockerCommands {
 
 	private final Logger logger = LoggerFactory.getLogger(getClass());
@@ -73,6 +76,11 @@
 				.withConfKeyName(configuration.getAdminKey())
 				.withImage(configuration.getEdgeImage())
 				.withAction(action);
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
 
 		commandExecutor.executeAsync(username, uuid, commandBuilder.buildCommand(runDockerCommand, dto));
 		return uuid;
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
index c25f484..b15b342 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
@@ -26,10 +26,13 @@
 import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.handlers.ExploratoryCallbackHandler;
 import com.epam.dlab.backendapi.service.impl.DockerService;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.exploratory.ExploratoryBaseDTO;
 import com.fasterxml.jackson.core.JsonProcessingException;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Objects;
+
 @Slf4j
 public class ExploratoryService extends DockerService implements DockerCommands {
 
@@ -51,6 +54,11 @@
 				.withConfKeyName(configuration.getAdminKey())
 				.withImage(dto.getNotebookImage())
 				.withAction(action);
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
 
 		commandExecutor.executeAsync(username, uuid, commandBuilder.buildCommand(runDockerCommand, dto));
 		return uuid;
@@ -62,7 +70,7 @@
 
 	private FileHandlerCallback getFileHandlerCallback(DockerAction action, String uuid, ExploratoryBaseDTO<?> dto) {
 		return new ExploratoryCallbackHandler(selfService, action, uuid, dto.getCloudSettings().getIamUser(),
-				dto.getExploratoryName());
+				dto.getProject(), dto.getExploratoryName());
 	}
 
 	private String nameContainer(String user, DockerAction action, String name) {
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/InfrastructureService.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/InfrastructureService.java
index 9406afd..699096a 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/InfrastructureService.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/InfrastructureService.java
@@ -23,9 +23,14 @@
 import com.epam.dlab.backendapi.ProvisioningServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.core.Directories;
 import com.epam.dlab.backendapi.core.FileHandlerCallback;
-import com.epam.dlab.backendapi.core.commands.*;
+import com.epam.dlab.backendapi.core.commands.CommandBuilder;
+import com.epam.dlab.backendapi.core.commands.DockerAction;
+import com.epam.dlab.backendapi.core.commands.DockerCommands;
+import com.epam.dlab.backendapi.core.commands.ICommandExecutor;
+import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.folderlistener.FolderListenerExecutor;
 import com.epam.dlab.backendapi.core.response.handlers.ResourcesStatusCallbackHandler;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.UserEnvironmentResources;
 import com.epam.dlab.dto.status.EnvResource;
 import com.epam.dlab.exceptions.DlabException;
@@ -37,6 +42,7 @@
 
 import java.util.Arrays;
 import java.util.List;
+import java.util.Objects;
 import java.util.Optional;
 
 import static com.epam.dlab.backendapi.core.commands.DockerAction.STATUS;
@@ -69,24 +75,23 @@
 
 			if (!(dto.getResourceList().getHostList().isEmpty() && dto.getResourceList().getClusterList().isEmpty())) {
 				log.trace("Request the status of resources for user {} after filtering: {}", username, dto);
-				commandExecutor.executeAsync(
-						username,
-						uuid,
-						commandBuilder.buildCommand(
-								new RunDockerCommand()
-										.withInteractive()
-										.withName(nameContainer(dto.getEdgeUserName(), STATUS, "resources"))
-										.withVolumeForRootKeys(configuration.getKeyDirectory())
-										.withVolumeForResponse(configuration.getImagesDirectory())
-										.withVolumeForLog(configuration.getDockerLogDirectory(), Directories
-												.EDGE_LOG_DIRECTORY)
-										.withResource(getResourceType())
-										.withRequestId(uuid)
-										.withConfKeyName(configuration.getAdminKey())
-										.withActionStatus(configuration.getEdgeImage()),
-								dto
-						)
-				);
+				RunDockerCommand runDockerCommand = new RunDockerCommand()
+						.withInteractive()
+						.withName(nameContainer(dto.getEdgeUserName(), STATUS, "resources"))
+						.withVolumeForRootKeys(configuration.getKeyDirectory())
+						.withVolumeForResponse(configuration.getImagesDirectory())
+						.withVolumeForLog(configuration.getDockerLogDirectory(), Directories.EDGE_LOG_DIRECTORY)
+						.withResource(getResourceType())
+						.withRequestId(uuid)
+						.withConfKeyName(configuration.getAdminKey())
+						.withActionStatus(configuration.getEdgeImage());
+				if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+						Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+						!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+					runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+				}
+
+				commandExecutor.executeAsync(username, uuid, commandBuilder.buildCommand(runDockerCommand, dto));
 			} else {
 				log.debug("Skipping calling status command. Resource lists are empty");
 			}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
index 8770591..1840fbb 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
@@ -25,11 +25,11 @@
 
 public interface ProjectService {
 
-	String create(UserInfo userInfo, ProjectCreateDTO projectCreateDTO);
+    String create(UserInfo userInfo, ProjectCreateDTO projectCreateDTO);
 
-	String terminate(UserInfo userInfo, ProjectActionDTO dto);
+    String terminate(UserInfo userInfo, ProjectActionDTO dto);
 
-	String start(UserInfo userInfo, ProjectActionDTO dto);
+    String start(UserInfo userInfo, ProjectActionDTO dto);
 
-	String stop(UserInfo userInfo, ProjectActionDTO dto);
+    String stop(UserInfo userInfo, ProjectActionDTO dto);
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/CheckInactivityServiceImpl.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/CheckInactivityServiceImpl.java
index 7f7eafe..3713589 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/CheckInactivityServiceImpl.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/CheckInactivityServiceImpl.java
@@ -24,6 +24,7 @@
 import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.handlers.CheckInactivityCallbackHandler;
 import com.epam.dlab.backendapi.service.CheckInactivityService;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.ResourceBaseDTO;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.computational.ComputationalCheckInactivityDTO;
@@ -32,6 +33,8 @@
 import com.google.inject.Singleton;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Objects;
+
 @Slf4j
 @Singleton
 public class CheckInactivityServiceImpl extends DockerService implements CheckInactivityService, DockerCommands {
@@ -41,7 +44,7 @@
 	public String checkComputationalInactivity(String userName, ComputationalCheckInactivityDTO dto) {
 		String uuid = DockerCommands.generateUUID();
 		startComputationalCallbackListener(userName, dto, uuid);
-		final RunDockerCommand dockerCommand = new RunDockerCommand()
+		final RunDockerCommand runDockerCommand = new RunDockerCommand()
 				.withInteractive()
 				.withRemove()
 				.withName(nameContainer(uuid, DockerAction.CHECK_INACTIVITY.toString()))
@@ -55,7 +58,13 @@
 				.withConfKeyName(configuration.getAdminKey())
 				.withImage(dto.getNotebookImage())
 				.withAction(DockerAction.CHECK_INACTIVITY);
-		runDockerCmd(userName, uuid, dockerCommand, dto);
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
+
+		runDockerCmd(userName, uuid, runDockerCommand, dto);
 		return uuid;
 	}
 
@@ -63,7 +72,7 @@
 	public String checkExploratoryInactivity(String userName, ExploratoryCheckInactivityAction dto) {
 		String uuid = DockerCommands.generateUUID();
 		startExploratoryCallbackListener(userName, dto, uuid);
-		final RunDockerCommand dockerCommand = new RunDockerCommand()
+		final RunDockerCommand runDockerCommand = new RunDockerCommand()
 				.withInteractive()
 				.withRemove()
 				.withName(nameContainer(uuid, DockerAction.CHECK_INACTIVITY.toString()))
@@ -75,7 +84,13 @@
 				.withConfKeyName(configuration.getAdminKey())
 				.withImage(dto.getNotebookImage())
 				.withAction(DockerAction.CHECK_INACTIVITY);
-		runDockerCmd(userName, uuid, dockerCommand, dto);
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
+
+		runDockerCmd(userName, uuid, runDockerCommand, dto);
 		return uuid;
 	}
 
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
index a368697..229e21c 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
@@ -21,7 +21,11 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.ProvisioningServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.core.commands.*;
+import com.epam.dlab.backendapi.core.commands.CommandBuilder;
+import com.epam.dlab.backendapi.core.commands.DockerAction;
+import com.epam.dlab.backendapi.core.commands.DockerCommands;
+import com.epam.dlab.backendapi.core.commands.ICommandExecutor;
+import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.folderlistener.FolderListenerExecutor;
 import com.epam.dlab.backendapi.core.response.handlers.ProjectCallbackHandler;
 import com.epam.dlab.backendapi.service.ProjectService;
@@ -37,6 +41,8 @@
 import com.google.inject.Inject;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Objects;
+
 @Slf4j
 public class ProjectServiceImpl implements ProjectService {
 	private static final String PROJECT_IMAGE = "docker.dlab-project";
@@ -96,6 +102,11 @@
 				.withConfKeyName(configuration.getAdminKey())
 				.withImage(image)
 				.withAction(action);
+		if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+				Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+				!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+			runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+		}
 
 		try {
 			commandExecutor.executeAsync(userInfo.getName(), uuid, commandBuilder.buildCommand(runDockerCommand, dto));
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/SparkClusterService.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/SparkClusterService.java
index a773b70..ce73096 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/SparkClusterService.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/service/impl/SparkClusterService.java
@@ -27,6 +27,7 @@
 import com.epam.dlab.backendapi.core.commands.RunDockerCommand;
 import com.epam.dlab.backendapi.core.response.handlers.ComputationalCallbackHandler;
 import com.epam.dlab.backendapi.core.response.handlers.ComputationalConfigure;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
 import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
@@ -38,7 +39,13 @@
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
 
-import static com.epam.dlab.backendapi.core.commands.DockerAction.*;
+import java.util.Objects;
+
+import static com.epam.dlab.backendapi.core.commands.DockerAction.CREATE;
+import static com.epam.dlab.backendapi.core.commands.DockerAction.RECONFIGURE_SPARK;
+import static com.epam.dlab.backendapi.core.commands.DockerAction.START;
+import static com.epam.dlab.backendapi.core.commands.DockerAction.STOP;
+import static com.epam.dlab.backendapi.core.commands.DockerAction.TERMINATE;
 
 @Singleton
 public class SparkClusterService extends DockerService implements DockerCommands {
@@ -76,7 +83,7 @@
 	private void runReconfigureSparkDockerCommand(UserInfo ui, ComputationalClusterConfigDTO clusterConfigDTO,
 												  String uuid) {
 		try {
-			final RunDockerCommand dockerCommand = new RunDockerCommand()
+			final RunDockerCommand runDockerCommand = new RunDockerCommand()
 					.withInteractive()
 					.withName(nameContainer(clusterConfigDTO.getEdgeUserName(), RECONFIGURE_SPARK,
 							clusterConfigDTO.getExploratoryName(),
@@ -89,8 +96,13 @@
 					.withConfKeyName(configuration.getAdminKey())
 					.withImage(DataEngineType.getDockerImageName(SPARK_ENGINE))
 					.withAction(RECONFIGURE_SPARK);
+			if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+					Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+					!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+				runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+			}
 
-			commandExecutor.executeAsync(ui.getName(), uuid, commandBuilder.buildCommand(dockerCommand,
+			commandExecutor.executeAsync(ui.getName(), uuid, commandBuilder.buildCommand(runDockerCommand,
 					clusterConfigDTO));
 		} catch (JsonProcessingException e) {
 			throw new DlabException("Could not" + RECONFIGURE_SPARK.toString() + "computational resources cluster", e);
@@ -103,7 +115,7 @@
 				configuration.getResourceStatusPollTimeout(),
 				getFileHandlerCallback(action, uuid, dto));
 		try {
-			final RunDockerCommand dockerCommand = new RunDockerCommand()
+			final RunDockerCommand runDockerCommand = new RunDockerCommand()
 					.withInteractive()
 					.withName(nameContainer(dto.getEdgeUserName(), action, dto.getExploratoryName(),
 							dto.getComputationalName()))
@@ -115,8 +127,13 @@
 					.withConfKeyName(configuration.getAdminKey())
 					.withImage(DataEngineType.getDockerImageName(SPARK_ENGINE))
 					.withAction(action);
+			if (configuration.getCloudProvider() == CloudProvider.AZURE &&
+					Objects.nonNull(configuration.getCloudConfiguration().getAzureAuthFile()) &&
+					!configuration.getCloudConfiguration().getAzureAuthFile().isEmpty()) {
+				runDockerCommand.withVolumeFoAzureAuthFile(configuration.getCloudConfiguration().getAzureAuthFile());
+			}
 
-			commandExecutor.executeAsync(ui.getName(), uuid, commandBuilder.buildCommand(dockerCommand, dto));
+			commandExecutor.executeAsync(ui.getName(), uuid, commandBuilder.buildCommand(runDockerCommand, dto));
 		} catch (JsonProcessingException e) {
 			throw new DlabException("Could not" + action.toString() + "computational resources cluster", e);
 		}
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
@@ -3,7 +3,7 @@
    "response": {
       "result": {
          "Action": "Configure notebook server",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json b/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
index fe2bf0a..81afe8a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
@@ -6,7 +6,7 @@
       "tunnel_port": "22",
       "full_edge_conf": {
         "notebook_role_profile_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-nb-Profile",
-        "tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+        "tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
         "edge_security_group_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-edge-SG",
         "allocation_id": "eipalloc-2801084f",
         "key_name": "${CONF_KEY_NAME}",
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
index 1933ea3..525bd9a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Start up notebook server",
          "ip": "172.31.48.131",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "hostname": "ip-172-31-48-131.us-west-2.compute.internal",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
index 4e1b9f0..e0ee8b1 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Stop notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
index 74c82ad7..a9e2a3a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Terminate notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
@@ -3,7 +3,7 @@
    "response": {
       "result": {
          "Action": "Configure notebook server",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json b/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
index cda1c9e..b2a9931 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
@@ -6,7 +6,7 @@
          "tunnel_port": "22",
          "full_edge_conf": {
             "notebook_role_profile_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-nb-Profile",
-            "tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+            "tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
             "edge_security_group_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-edge-SG",
             "allocation_id": "eipalloc-2801084f",
             "key_name": "${CONF_KEY_NAME}",
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
index 1933ea3..525bd9a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Start up notebook server",
          "ip": "172.31.48.131",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "hostname": "ip-172-31-48-131.us-west-2.compute.internal",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
index 4e1b9f0..e0ee8b1 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Stop notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
index 74c82ad7..a9e2a3a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Terminate notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
@@ -3,7 +3,7 @@
    "response": {
       "result": {
          "Action": "Configure notebook server",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java b/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
index bf92609..a065248 100644
--- a/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
+++ b/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
@@ -70,7 +70,7 @@
 
 		RESTServiceMock selfService = new RESTServiceMock();
 		ExploratoryCallbackHandler handler = new ExploratoryCallbackHandler(selfService, action,
-				getRequestId(exec), getEdgeUserName(exec), getExploratoryName(exec));
+				getRequestId(exec), getEdgeUserName(exec), "", getExploratoryName(exec));
 		handler.handle(exec.getResponseFileName(), Files.readAllBytes(Paths.get(exec.getResponseFileName())));
 
 		try {
diff --git a/services/self-service/Dockerfile_aws b/services/self-service/Dockerfile
similarity index 89%
rename from services/self-service/Dockerfile_aws
rename to services/self-service/Dockerfile
index 951fdd7..bb2a7b7 100644
--- a/services/self-service/Dockerfile_aws
+++ b/services/self-service/Dockerfile
@@ -28,13 +28,14 @@
     python \
     python-dev \
     py-pip \
+    openssl \
     build-base \
     && pip install awscli --upgrade \
     && apk --purge -v del py-pip \
     && rm -rf /var/cache/apk/*
 
-COPY self-service-2.1.jar /root/
-COPY entrypoint_aws.sh /
-RUN chmod 755 /entrypoint_aws.sh
+COPY self-service-2.2.jar /root/
+COPY entrypoint.sh /
+RUN chmod 755 /entrypoint.sh
 
-ENTRYPOINT ["/entrypoint_aws.sh"]
\ No newline at end of file
+ENTRYPOINT ["/entrypoint.sh"]
\ No newline at end of file
diff --git a/services/self-service/entrypoint.sh b/services/self-service/entrypoint.sh
new file mode 100644
index 0000000..f2d7149
--- /dev/null
+++ b/services/self-service/entrypoint.sh
@@ -0,0 +1,55 @@
+#!/bin/sh
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+checkfile () {
+if [ -s /root/step-certs/ca.crt ]
+then
+  RUN="true"
+else
+  RUN="false"
+  sleep 5
+fi
+}
+
+/bin/mkdir -p /root/keys
+
+if [ -d "/root/step-certs" ]; then
+  while checkfile
+  do
+    if [ "$RUN" = "false" ];
+    then
+        echo "Waiting..."
+    else
+        echo "CA exist!"
+        break
+    fi
+  done
+  /usr/bin/keytool -importcert -trustcacerts -alias step-ca -file /root/step-certs/ca.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts
+  /usr/bin/keytool -importcert -trustcacerts -alias step-crt -file /root/step-certs/tls.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts
+fi
+
+
+
+/usr/bin/openssl pkcs12 -export -in /root/step-certs/tls.crt -inkey /root/step-certs/tls.key -name ssn -out ssn.p12 -password pass:${SSN_KEYSTORE_PASSWORD}
+/usr/bin/keytool -importkeystore -srckeystore ssn.p12 -srcstoretype PKCS12 -alias ssn -destkeystore /root/keys/ssn.keystore.jks -deststorepass "${SSN_KEYSTORE_PASSWORD}" -srcstorepass "${SSN_KEYSTORE_PASSWORD}"
+/usr/bin/keytool -keystore /root/keys/ssn.keystore.jks -alias step-ca -import -file /root/step-certs/ca.crt  -deststorepass "${SSN_KEYSTORE_PASSWORD}" -srcstorepass "${SSN_KEYSTORE_PASSWORD}" -noprompt
+/usr/bin/java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 -DDLAB_CONF_DIR=/root/ /root/self-service-2.2.jar server /root/self-service.yml
\ No newline at end of file
diff --git a/services/self-service/entrypoint_aws.sh b/services/self-service/entrypoint_aws.sh
deleted file mode 100644
index b71c097..0000000
--- a/services/self-service/entrypoint_aws.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-/bin/mkdir -p /root/keys
-
-/usr/bin/aws s3 cp s3://${SSN_BUCKET_NAME}/dlab/certs/ssn/ssn.keystore.jks /root/keys/ssn.keystore.jks
-/usr/bin/aws s3 cp s3://${SSN_BUCKET_NAME}/dlab/certs/ssn/ssn.crt /root/keys/ssn.crt
-/usr/bin/aws s3 cp s3://${SSN_BUCKET_NAME}/dlab/certs/endpoint/endpoint.crt /root/keys/endpoint.crt
-
-/usr/bin/keytool -importcert -trustcacerts -alias dlab -file /root/keys/ssn.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts
-/usr/bin/keytool -importcert -trustcacerts -file /root/keys/endpoint.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts
-
-/usr/bin/java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 -DDLAB_CONF_DIR=/root/ /root/self-service-2.1.jar server /root/self-service.yml
\ No newline at end of file
diff --git a/services/self-service/entrypoint_gcp.sh b/services/self-service/entrypoint_gcp.sh
deleted file mode 100644
index cb750ff..0000000
--- a/services/self-service/entrypoint_gcp.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-# *****************************************************************************
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# ******************************************************************************
-
-#mkdir -p /root/keys
-#/usr/bin/keytool -genkeypair -alias dlab -keyalg RSA -validity 730 -storepass password \
-#  -keypass password -keystore /root/keys/ssn.keystore.jks \
-#  -keysize 2048 -dname "CN=35.237.224.151" -ext SAN=dns:localhost,ip:35.237.224.151
-#/usr/bin/keytool -exportcert -alias dlab -storepass password -file /root/keys/ssn.crt \
-#  -keystore /root/keys/ssn.keystore.jks
-
-/usr/bin/keytool -importcert -trustcacerts -alias dlab -file /root/keys/ssn.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts
-/usr/bin/keytool -importcert -trustcacerts -alias endpoint1 -file /root/keys/endpoint1.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts
-/usr/bin/keytool -importcert -trustcacerts -alias endpoint2 -file /root/keys/endpoint2.crt -noprompt -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts
-
-/usr/bin/java -Xmx1024M -jar -Duser.timezone=UTC -Dfile.encoding=UTF-8 -DDLAB_CONF_DIR=/root/ /root/self-service-2.1.jar server /root/self-service.yml
\ No newline at end of file
diff --git a/services/self-service/pom.xml b/services/self-service/pom.xml
index 25dbf42..382dc7c 100644
--- a/services/self-service/pom.xml
+++ b/services/self-service/pom.xml
@@ -184,6 +184,12 @@
             <artifactId>guacamole-common</artifactId>
             <version>1.0.0</version>
         </dependency>
+
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-collections4</artifactId>
+            <version>4.4</version>
+        </dependency>
     </dependencies>
 
     <build>
diff --git a/services/self-service/self-service.yml b/services/self-service/self-service.yml
index ab6dfd9..df92c25 100644
--- a/services/self-service/self-service.yml
+++ b/services/self-service/self-service.yml
@@ -21,16 +21,13 @@
 
 <#include "ssn.yml">
 
-<#if CLOUD_TYPE == "aws">
 # Minimum and maximum number of slave EMR instances than could be created
 minEmrInstanceCount: 2
 maxEmrInstanceCount: 14
 # Minimum and maximum percentage cost for slave EMR spot instances biding
 minEmrSpotInstanceBidPct: 20
 maxEmrSpotInstanceBidPct: 90
-</#if>
 
-<#if CLOUD_TYPE == "gcp">
 # Maximum length for gcp user name (due to gcp restrictions)
 maxUserNameLength: 10
 # Minimum and maximum number of slave Dataproc instances that could be created
@@ -38,7 +35,6 @@
 maxInstanceCount: 15
 minDataprocPreemptibleCount: 0
 gcpOuauth2AuthenticationEnabled: false
-</#if>
 
 # Boundaries for Spark cluster creation
 minSparkInstanceCount: 2
@@ -61,11 +57,7 @@
 billingConfFile: ${DLAB_CONF_DIR}/billing.yml
 </#if>
 
-<#if CLOUD_TYPE == "azure">
-azureUseLdap: <LOGIN_USE_LDAP>
 ssnInstanceSize: <SSN_INSTANCE_SIZE>
-maxSessionDurabilityMilliseconds: 288000000
-</#if>
 
 serviceBaseName: SERVICE_BASE_NAME
 os: OPERATION_SYSTEM
@@ -83,8 +75,8 @@
 #      port: 8080
   - type: https
     port: 8443
-    certAlias: dlab
-    validateCerts: true
+    certAlias: ssn
+    validateCerts: false
     keyStorePath: ${KEY_STORE_PATH}
     keyStorePassword: ${KEY_STORE_PASSWORD}
     trustStorePath: ${TRUST_STORE_PATH}
@@ -94,8 +86,8 @@
 #      port: 8081
   - type: https
     port: 8444
-    certAlias: dlab
-    validateCerts: true
+    certAlias: ssn
+    validateCerts: false
     keyStorePath: ${KEY_STORE_PATH}
     keyStorePassword: ${KEY_STORE_PASSWORD}
     trustStorePath: ${TRUST_STORE_PATH}
@@ -148,13 +140,19 @@
     cron: "*/20 * * ? * * *"
   checkQuoteScheduler:
     enabled: true
-    cron: "0 0 * ? * * *"
+    cron: "0 2/15 * ? * *"
   checkUserQuoteScheduler:
     enabled: false
     cron: "0 0 * ? * * *"
   checkProjectQuoteScheduler:
     enabled: true
-    cron: "0 * * ? * * *"
+    cron: "0 4/15 * ? * *"
+  checkEndpointStatusScheduler:
+    enabled: true
+    cron: "0 6/15 * ? * *"
+  billingScheduler:
+    enabled: true
+    cron: "0 0/15 * ? * *"
 
 
 guacamole:
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/SelfServiceApplication.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/SelfServiceApplication.java
index da6a5c5..6a12ea5 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/SelfServiceApplication.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/SelfServiceApplication.java
@@ -21,7 +21,6 @@
 
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.dao.IndexCreator;
-import com.epam.dlab.backendapi.domain.EnvStatusListener;
 import com.epam.dlab.backendapi.domain.ExploratoryLibCache;
 import com.epam.dlab.backendapi.dropwizard.bundles.DlabKeycloakBundle;
 import com.epam.dlab.backendapi.dropwizard.listeners.MongoStartupListener;
@@ -31,6 +30,7 @@
 import com.epam.dlab.backendapi.resources.*;
 import com.epam.dlab.backendapi.resources.callback.*;
 import com.epam.dlab.backendapi.schedulers.internal.ManagedScheduler;
+import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.servlet.guacamole.GuacamoleServlet;
 import com.epam.dlab.cloud.CloudModule;
 import com.epam.dlab.constants.ServiceConsts;
@@ -106,11 +106,10 @@
 		environment.lifecycle().addServerLifecycleListener(injector.getInstance(MongoStartupListener.class));
 		final RestoreHandlerStartupListener restoreHandlerStartupListener =
 				new RestoreHandlerStartupListener(injector.getInstance(Key.get(RESTService.class,
-						Names.named(ServiceConsts.PROVISIONING_SERVICE_NAME))));
+						Names.named(ServiceConsts.PROVISIONING_SERVICE_NAME))), injector.getInstance(EndpointService.class));
 		environment.lifecycle().addServerLifecycleListener(restoreHandlerStartupListener);
 		environment.lifecycle().addServerLifecycleListener(this::disableGzipHandlerForGuacamoleServlet);
 		environment.lifecycle().manage(injector.getInstance(IndexCreator.class));
-		environment.lifecycle().manage(injector.getInstance(EnvStatusListener.class));
 		environment.lifecycle().manage(injector.getInstance(ExploratoryLibCache.class));
 		environment.lifecycle().manage(injector.getInstance(ManagedScheduler.class));
 		environment.healthChecks().register(ServiceConsts.MONGO_NAME, injector.getInstance(MongoHealthCheck.class));
@@ -128,9 +127,6 @@
 		jersey.register(new DlabValidationExceptionMapper());
 		jersey.register(new ValidationExceptionMapper());
 		jersey.register(new ResourceQuoteReachedExceptionMapper());
-		jersey.register(injector.getInstance(SecurityResource.class));
-		jersey.register(injector.getInstance(KeyUploaderResource.class));
-		jersey.register(injector.getInstance(EdgeResource.class));
 
 		jersey.register(injector.getInstance(InfrastructureTemplateResource.class));
 		jersey.register(injector.getInstance(InfrastructureInfoResource.class));
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestDescription.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/ProjectAdmin.java
similarity index 87%
rename from integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestDescription.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/ProjectAdmin.java
index 5c156b4..2fca3cd 100644
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestDescription.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/ProjectAdmin.java
@@ -17,15 +17,14 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.test.libs;
+package com.epam.dlab.backendapi.annotation;
 
 import java.lang.annotation.ElementType;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 
+@Target(ElementType.METHOD)
 @Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.TYPE)
-public @interface TestDescription {
-    String value() default "";
+public @interface ProjectAdmin {
 }
diff --git a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestDescription.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/User.java
similarity index 87%
copy from integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestDescription.java
copy to services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/User.java
index 5c156b4..b56dd20 100644
--- a/integration-tests/src/test/java/com/epam/dlab/automation/test/libs/TestDescription.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/User.java
@@ -17,15 +17,14 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.test.libs;
+package com.epam.dlab.backendapi.annotation;
 
 import java.lang.annotation.ElementType;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 
+@Target(ElementType.PARAMETER)
 @Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.TYPE)
-public @interface TestDescription {
-    String value() default "";
+public @interface User {
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/conf/SelfServiceApplicationConfiguration.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/conf/SelfServiceApplicationConfiguration.java
index 48410d1..aaface6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/conf/SelfServiceApplicationConfiguration.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/conf/SelfServiceApplicationConfiguration.java
@@ -21,17 +21,12 @@
 
 import com.epam.dlab.ServiceConfiguration;
 import com.epam.dlab.backendapi.domain.SchedulerConfigurationData;
-import com.epam.dlab.backendapi.validation.SelfServiceCloudConfigurationSequenceProvider;
 import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.rest.client.RESTServiceFactory;
-import com.epam.dlab.validation.AwsValidation;
-import com.epam.dlab.validation.AzureValidation;
-import com.epam.dlab.validation.GcpValidation;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import io.dropwizard.client.JerseyClientConfiguration;
 import io.dropwizard.util.Duration;
 import org.hibernate.validator.constraints.NotEmpty;
-import org.hibernate.validator.group.GroupSequenceProvider;
 
 import javax.validation.Valid;
 import javax.validation.constraints.Max;
@@ -42,37 +37,33 @@
 /**
  * Configuration for Self Service.
  */
-@GroupSequenceProvider(SelfServiceCloudConfigurationSequenceProvider.class)
 public class SelfServiceApplicationConfiguration extends ServiceConfiguration {
 
-	@Min(value = 2, groups = AwsValidation.class)
+	@Min(value = 2)
 	@JsonProperty
 	private int minEmrInstanceCount;
 
-	@Max(value = 1000, groups = AwsValidation.class)
+	@Max(value = 1000)
 	@JsonProperty
 	private int maxEmrInstanceCount;
 
-	@Min(value = 10, groups = AwsValidation.class)
+	@Min(value = 10)
 	@JsonProperty
 	private int minEmrSpotInstanceBidPct;
 
-	@Max(value = 95, groups = AwsValidation.class)
+	@Max(value = 95)
 	@JsonProperty
 	private int maxEmrSpotInstanceBidPct;
 
-	@Min(value = 2, groups = {AzureValidation.class, AwsValidation.class, GcpValidation.class})
+	@Min(value = 2)
 	@JsonProperty
 	private int minSparkInstanceCount;
 
-	@Max(value = 1000, groups = {AzureValidation.class, AwsValidation.class, GcpValidation.class})
+	@Max(value = 1000)
 	@JsonProperty
 	private int maxSparkInstanceCount;
 
 	@JsonProperty
-	private boolean azureUseLdap;
-
-	@JsonProperty
 	private String ssnInstanceSize;
 
 	@JsonProperty
@@ -87,7 +78,7 @@
 	@JsonProperty
 	private boolean billingSchedulerEnabled = false;
 
-	@NotEmpty(groups = AwsValidation.class)
+	@NotEmpty
 	@JsonProperty
 	private String billingConfFile;
 	@JsonProperty
@@ -101,8 +92,6 @@
 	@JsonProperty
 	private boolean gcpOuauth2AuthenticationEnabled;
 	@JsonProperty
-	private long maxSessionDurabilityMilliseconds;
-	@JsonProperty
 	private boolean mongoMigrationEnabled;
 	@JsonProperty
 	private int privateKeySize = 2048;
@@ -146,10 +135,6 @@
 		return jerseyClient;
 	}
 
-	public long getMaxSessionDurabilityMilliseconds() {
-		return maxSessionDurabilityMilliseconds;
-	}
-
 	public Map<String, SchedulerConfigurationData> getSchedulers() {
 		return schedulers;
 	}
@@ -245,10 +230,6 @@
 		return privateKeySize;
 	}
 
-	public boolean isAzureUseLdap() {
-		return azureUseLdap;
-	}
-
 	public String getSsnInstanceSize() {
 		return ssnInstanceSize;
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java
index 980c452..28a6c64 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java
@@ -19,78 +19,62 @@
 
 package com.epam.dlab.backendapi.dao;
 
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.domain.BaseShape;
-import com.epam.dlab.backendapi.domain.DataEngineServiceShape;
-import com.epam.dlab.backendapi.domain.DataEngineShape;
-import com.epam.dlab.backendapi.domain.EndpointShape;
-import com.epam.dlab.backendapi.domain.ExploratoryShape;
-import com.epam.dlab.backendapi.domain.SsnShape;
+import com.epam.dlab.backendapi.domain.BillingReportLine;
 import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.backendapi.roles.RoleType;
-import com.epam.dlab.backendapi.roles.UserRoles;
-import com.epam.dlab.billing.BillingCalculationUtils;
-import com.epam.dlab.billing.DlabResourceType;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.model.aws.ReportLine;
-import com.google.common.collect.Lists;
+import com.epam.dlab.dto.billing.BillingResourceType;
 import com.google.inject.Inject;
-import com.mongodb.client.AggregateIterable;
-import com.mongodb.client.FindIterable;
 import com.mongodb.client.model.Aggregates;
 import com.mongodb.client.model.Filters;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.collections4.CollectionUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.bson.Document;
 import org.bson.conversions.Bson;
 
 import java.math.BigDecimal;
-import java.util.*;
+import java.time.ZoneId;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
 import java.util.function.Supplier;
+import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 
-import static com.epam.dlab.backendapi.dao.ComputationalDAO.COMPUTATIONAL_ID;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_ID;
 import static com.epam.dlab.backendapi.dao.MongoCollections.BILLING;
-import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_RESOURCE_TYPE;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_USAGE_DATE;
+import static com.mongodb.client.model.Accumulators.max;
+import static com.mongodb.client.model.Accumulators.min;
 import static com.mongodb.client.model.Accumulators.sum;
 import static com.mongodb.client.model.Aggregates.group;
 import static com.mongodb.client.model.Aggregates.match;
-import static com.mongodb.client.model.Filters.*;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.gte;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.lte;
+import static com.mongodb.client.model.Filters.regex;
 import static java.util.Collections.singletonList;
 
 @Slf4j
-public abstract class BaseBillingDAO<T extends BillingFilter> extends BaseDAO implements BillingDAO<T> {
-
-	public static final String SHAPE = "shape";
-	public static final String SERVICE_BASE_NAME = "service_base_name";
-	public static final String ITEMS = "lines";
-	public static final String COST_TOTAL = "cost_total";
-	public static final String FULL_REPORT = "full_report";
-
-	private static final String PROJECT = "project";
-	private static final String MASTER_NODE_SHAPE = "master_node_shape";
-	private static final String SLAVE_NODE_SHAPE = "slave_node_shape";
-	private static final String TOTAL_INSTANCE_NUMBER = "total_instance_number";
-
-	private static final String DATAENGINE_SHAPE = "dataengine_instance_shape";
-	private static final String DATAENGINE_INSTANCE_COUNT = "dataengine_instance_count";
-
-	private static final String DATAENGINE_DOCKER_IMAGE = "image";
+public class BaseBillingDAO extends BaseDAO implements BillingDAO {
 	private static final int ONE_HUNDRED = 100;
-	private static final String TOTAL_FIELD_NAME = "total";
 	private static final String COST_FIELD = "$cost";
-	public static final String SHARED_RESOURCE_NAME = "Shared resource";
-	protected static final String FIELD_PROJECT = "project";
-	private static final String EDGE_FORMAT = "%s-%s-%s-edge";
-	private static final String PROJECT_COLLECTION = "Projects";
-	private static final String TAGS = "tags";
+	private static final String TOTAL_FIELD_NAME = "total";
+	private static final String PROJECT = "project";
+	private static final String APPLICATION = "application";
+	private static final String USAGE_DATE = "usageDate";
+	private static final String USER = "user";
+	private static final String RESOURCE_TYPE = "resource_type";
+	private static final String DLAB_ID = "dlabId";
+	private static final String FROM = "from";
+	private static final String TO = "to";
+	private static final String PRODUCT = "product";
+	private static final String CURRENCY = "currency";
+	private static final String COST = "cost";
+	private static final String RESOURCE_NAME = "resource_name";
+	private static final String ENDPOINT = "endpoint";
+	private static final String SHAPE = "shape";
+	private static final String EXPLORATORY = "exploratoryName";
 
 	@Inject
 	protected SettingsDAO settings;
@@ -100,161 +84,6 @@
 	private ProjectDAO projectDAO;
 
 	@Override
-	public Document getReport(UserInfo userInfo, T filter) {
-		boolean isFullReport = UserRoles.checkAccess(userInfo, RoleType.PAGE, "/api/infrastructure_provision/billing",
-				userInfo.getRoles());
-		setUserFilter(userInfo, filter, isFullReport);
-		List<Bson> matchCriteria = matchCriteria(filter);
-		List<Bson> pipeline = new ArrayList<>();
-		if (!matchCriteria.isEmpty()) {
-			pipeline.add(Aggregates.match(Filters.and(matchCriteria)));
-		}
-		pipeline.add(groupCriteria());
-		pipeline.add(sortCriteria());
-		final Map<String, BaseShape> shapes = getShapes(filter.getShapes());
-		return prepareReport(filter.getStatuses(), !filter.getShapes().isEmpty(),
-				getCollection(BILLING).aggregate(pipeline), shapes, isFullReport);
-	}
-
-	private Document prepareReport(List<UserInstanceStatus> statuses, boolean filterByShape,
-								   AggregateIterable<Document> agg,
-								   Map<String, BaseShape> shapes, boolean fullReport) {
-
-		List<Document> reportItems = new ArrayList<>();
-
-		String usageDateStart = null;
-		String usageDateEnd = null;
-		double costTotal = 0D;
-
-		for (Document d : agg) {
-			Document id = (Document) d.get(MongoKeyWords.MONGO_ID);
-			String resourceId = id.getString(dlabIdFieldName());
-			BaseShape shape = shapes.get(resourceId);
-			final UserInstanceStatus status = Optional.ofNullable(shape).map(BaseShape::getStatus).orElse(null);
-			if ((filterByShape && shape == null) ||
-					(!statuses.isEmpty() && statuses.stream().noneMatch(s -> s.equals(status)))) {
-				continue;
-			}
-
-
-			String dateStart = d.getString(MongoKeyWords.USAGE_FROM);
-			if (StringUtils.compare(usageDateStart, dateStart, false) > 0) {
-				usageDateStart = dateStart;
-			}
-			String dateEnd = d.getString(MongoKeyWords.USAGE_TO);
-			if (StringUtils.compare(usageDateEnd, dateEnd) < 0) {
-				usageDateEnd = dateEnd;
-			}
-
-
-			costTotal += d.getDouble(MongoKeyWords.COST);
-
-			final String dlabResourceType = id.getString("dlab_resource_type");
-			final String statusString = Optional
-					.ofNullable(status)
-					.map(UserInstanceStatus::toString)
-					.orElse(StringUtils.EMPTY);
-
-			Document item = new Document()
-					.append(MongoKeyWords.DLAB_USER, getUserOrDefault(id.getString(USER)))
-					.append(dlabIdFieldName(), resourceId)
-					.append(shapeFieldName(), Optional.ofNullable(shape).map(BaseShape::format)
-							.orElse(StringUtils.EMPTY))
-					.append("dlab_resource_type", DlabResourceType
-							.getResourceTypeName(dlabResourceType)) //todo check on azure!!!
-					.append(STATUS, statusString)
-					.append(FIELD_RESOURCE_TYPE, resourceType(id))
-					.append(productFieldName(), id.getString(productFieldName()))
-					.append(PROJECT, id.getString(PROJECT))
-					.append(MongoKeyWords.COST, d.getDouble(MongoKeyWords.COST))
-					.append(costFieldName(), BillingCalculationUtils.formatDouble(d.getDouble(MongoKeyWords
-							.COST)))
-					.append(currencyCodeFieldName(), id.getString(currencyCodeFieldName()))
-					.append(usageDateFromFieldName(), dateStart)
-					.append(usageDateToFieldName(), dateEnd)
-					.append(TAGS, Optional.ofNullable(shape).map(BaseShape::getTags));
-
-			reportItems.add(item);
-		}
-
-		return new Document()
-				.append(SERVICE_BASE_NAME, settings.getServiceBaseName())
-				.append(usageDateFromFieldName(), usageDateStart)
-				.append(usageDateToFieldName(), usageDateEnd)
-				.append(ITEMS, reportItems)
-				.append(COST_TOTAL, BillingCalculationUtils.formatDouble(BillingCalculationUtils.round
-						(costTotal, 2)))
-				.append(currencyCodeFieldName(), (reportItems.isEmpty() ? null :
-						reportItems.get(0).getString(currencyCodeFieldName())))
-				.append(FULL_REPORT, fullReport);
-
-	}
-
-	protected String resourceType(Document id) {
-		return id.getString(FIELD_RESOURCE_TYPE);
-	}
-
-	protected String currencyCodeFieldName() {
-		return "currency_code";
-	}
-
-	protected String usageDateToFieldName() {
-		return MongoKeyWords.USAGE_TO;
-	}
-
-	protected String costFieldName() {
-		return MongoKeyWords.COST;
-	}
-
-	protected String productFieldName() {
-		return ReportLine.FIELD_PRODUCT;
-	}
-
-	protected String usageDateFromFieldName() {
-		return MongoKeyWords.USAGE_FROM;
-	}
-
-	protected String dlabIdFieldName() {
-		return ReportLine.FIELD_DLAB_ID;
-	}
-
-	protected String shapeFieldName() {
-		return SHAPE;
-	}
-
-	protected abstract Bson sortCriteria();
-
-	protected abstract Bson groupCriteria();
-
-	private Map<String, BaseShape> getShapes(List<String> shapeNames) {
-		FindIterable<Document> userInstances = getUserInstances();
-		final Map<String, BaseShape> shapes = new HashMap<>();
-
-		for (Document d : userInstances) {
-			getExploratoryShape(shapeNames, d)
-					.ifPresent(shape -> shapes.put(d.getString(EXPLORATORY_ID), shape));
-			@SuppressWarnings("unchecked")
-			List<Document> comp = (List<Document>) d.get(COMPUTATIONAL_RESOURCES);
-			comp.forEach(c -> (isDataEngine(c.getString(DATAENGINE_DOCKER_IMAGE)) ? getDataEngineShape(shapeNames, c) :
-					getDataEngineServiceShape(shapeNames, c))
-					.ifPresent(shape -> shapes.put(c.getString(COMPUTATIONAL_ID), shape)));
-		}
-
-		StreamSupport.stream(getCollection(PROJECT_COLLECTION).find().spliterator(), false)
-				.forEach(d -> ((List<Document>) d.get("endpoints"))
-						.forEach(endpoint -> getEndpointShape(shapeNames, endpoint)
-								.ifPresent(shape -> shapes.put(String.format(EDGE_FORMAT, getServiceBaseName(),
-										d.getString("name").toLowerCase(),
-										endpoint.getString("name")), shape))));
-
-		getSsnShape(shapeNames)
-				.ifPresent(shape -> shapes.put(getServiceBaseName() + "-ssn", shape));
-
-		log.trace("Loaded shapes is {}", shapes);
-		return shapes;
-	}
-
-	@Override
 	public Double getTotalCost() {
 		return aggregateBillingData(singletonList(group(null, sum(TOTAL_FIELD_NAME, COST_FIELD))));
 	}
@@ -296,7 +125,6 @@
 				.isPresent();
 	}
 
-
 	@Override
 	public boolean isProjectQuoteReached(String project) {
 		final Double projectCost = getProjectCost(project);
@@ -306,12 +134,42 @@
 	}
 
 	@Override
+	public List<BillingReportLine> findBillingData(String project, String endpoint, List<String> resourceNames) {
+		return find(BILLING, and(eq(PROJECT, project), eq(ENDPOINT, endpoint), in(RESOURCE_NAME, resourceNames)), BillingReportLine.class);
+	}
+
+	@Override
 	public int getBillingProjectQuoteUsed(String project) {
 		return toPercentage(() -> projectDAO.getAllowedBudget(project), getProjectCost(project));
 	}
 
-	protected String getUserOrDefault(String user) {
-		return StringUtils.isNotBlank(user) ? user : SHARED_RESOURCE_NAME;
+	public List<BillingReportLine> aggregateBillingData(BillingFilter filter) {
+		List<Bson> pipeline = new ArrayList<>();
+		List<Bson> matchCriteria = matchCriteria(filter);
+		if (!matchCriteria.isEmpty()) {
+			pipeline.add(Aggregates.match(Filters.and(matchCriteria)));
+		}
+		pipeline.add(groupCriteria());
+		return StreamSupport.stream(getCollection(BILLING).aggregate(pipeline).spliterator(), false)
+				.map(this::toBillingReport)
+				.collect(Collectors.toList());
+	}
+
+	@Override
+	public void deleteByUsageDate(String application, String usageDate) {
+		deleteMany(BILLING, and(eq(APPLICATION, application), eq(USAGE_DATE, usageDate)));
+	}
+
+	@Override
+	public void deleteByUsageDateRegex(String application, String usageDate) {
+		deleteMany(BILLING, and(eq(APPLICATION, application), regex(USAGE_DATE, "^" + usageDate)));
+	}
+
+	@Override
+	public void save(List<BillingReportLine> billingData) {
+		if (CollectionUtils.isNotEmpty(billingData)) {
+			insertMany(BILLING, new ArrayList<>(billingData));
+		}
 	}
 
 	private Integer toPercentage(Supplier<Optional<Integer>> allowedBudget, Double totalCost) {
@@ -321,150 +179,65 @@
 				.orElse(BigDecimal.ZERO.intValue());
 	}
 
-	private List<Bson> matchCriteria(BillingFilter filter) {
-
-		List<Bson> searchCriteria = new ArrayList<>();
-
-		if (filter.getUser() != null && !filter.getUser().isEmpty()) {
-			searchCriteria.add(Filters.in(MongoKeyWords.DLAB_USER, filter.getUser()));
-		}
-
-		if (filter.getResourceType() != null && !filter.getResourceType().isEmpty()) {
-			searchCriteria.add(Filters.in("dlab_resource_type",
-					DlabResourceType.getResourceTypeIds(filter.getResourceType())));
-		}
-
-		if (filter.getDlabId() != null && !filter.getDlabId().isEmpty()) {
-			searchCriteria.add(regex(dlabIdFieldName(), filter.getDlabId(), "i"));
-		}
-
-		if (filter.getDateStart() != null && !filter.getDateStart().isEmpty()) {
-			searchCriteria.add(gte(FIELD_USAGE_DATE, filter.getDateStart()));
-		}
-		if (filter.getDateEnd() != null && !filter.getDateEnd().isEmpty()) {
-			searchCriteria.add(lte(FIELD_USAGE_DATE, filter.getDateEnd()));
-		}
-		if (filter.getProjects() != null && !filter.getProjects().isEmpty()) {
-			searchCriteria.add(in(PROJECT, filter.getProjects()));
-		}
-
-		searchCriteria.addAll(cloudMatchCriteria((T) filter));
-		return searchCriteria;
-	}
-
-	protected abstract List<Bson> cloudMatchCriteria(T filter);
-
 	private Double aggregateBillingData(List<Bson> pipeline) {
 		return Optional.ofNullable(aggregate(BILLING, pipeline).first())
 				.map(d -> d.getDouble(TOTAL_FIELD_NAME))
 				.orElse(BigDecimal.ZERO.doubleValue());
 	}
 
-	private FindIterable<Document> getUserInstances() {
-		return getCollection(USER_INSTANCES)
-				.find()
-				.projection(
-						fields(excludeId(),
-								include(SHAPE, EXPLORATORY_ID, STATUS, TAGS,
-										COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_ID,
-										COMPUTATIONAL_RESOURCES + "." + MASTER_NODE_SHAPE,
-										COMPUTATIONAL_RESOURCES + "." + SLAVE_NODE_SHAPE,
-										COMPUTATIONAL_RESOURCES + "." + TOTAL_INSTANCE_NUMBER,
-										COMPUTATIONAL_RESOURCES + "." + DATAENGINE_SHAPE,
-										COMPUTATIONAL_RESOURCES + "." + DATAENGINE_INSTANCE_COUNT,
-										COMPUTATIONAL_RESOURCES + "." + DATAENGINE_DOCKER_IMAGE,
-										COMPUTATIONAL_RESOURCES + "." + STATUS,
-										COMPUTATIONAL_RESOURCES + "." + TAGS
-								)));
+	private Bson groupCriteria() {
+		return group(getGroupingFields(USER, DLAB_ID, RESOURCE_TYPE, RESOURCE_NAME, PROJECT, PRODUCT, CURRENCY, SHAPE, EXPLORATORY),
+				sum(COST, "$" + COST),
+				min(FROM, "$" + FROM),
+				max(TO, "$" + TO));
 	}
 
-	private Optional<ExploratoryShape> getExploratoryShape(List<String> shapeNames, Document d) {
-		final String shape = d.getString(SHAPE);
-		if (isShapeAcceptable(shapeNames, shape)) {
-			return Optional.of(ExploratoryShape.builder()
-					.shape(shape)
-					.status(UserInstanceStatus.of(d.getString(STATUS)))
-					.tags((Map<String, String>) d.get(TAGS))
-					.build());
+	private List<Bson> matchCriteria(BillingFilter filter) {
+		List<Bson> searchCriteria = new ArrayList<>();
+
+		if (CollectionUtils.isNotEmpty(filter.getUsers())) {
+			searchCriteria.add(in(USER, filter.getUsers()));
 		}
-		return Optional.empty();
-	}
-
-	private Optional<DataEngineServiceShape> getDataEngineServiceShape(List<String> shapeNames, Document c) {
-		final String desMasterShape = c.getString(MASTER_NODE_SHAPE);
-		final String desSlaveShape = c.getString(SLAVE_NODE_SHAPE);
-		if (isShapeAcceptable(shapeNames, desMasterShape, desSlaveShape)) {
-			return Optional.of(DataEngineServiceShape.builder()
-					.shape(desMasterShape)
-					.status(UserInstanceStatus.of(c.getString(STATUS)))
-					.slaveCount(c.getString(TOTAL_INSTANCE_NUMBER))
-					.slaveShape(desSlaveShape)
-					.tags((Map<String, String>) c.get(TAGS))
-					.build());
+		if (CollectionUtils.isNotEmpty(filter.getResourceTypes())) {
+			searchCriteria.add(in(RESOURCE_TYPE, filter.getResourceTypes()));
 		}
-		return Optional.empty();
-	}
-
-	private Optional<DataEngineShape> getDataEngineShape(List<String> shapeNames, Document c) {
-		final String shape = c.getString(DATAENGINE_SHAPE);
-		if ((isShapeAcceptable(shapeNames, shape)) && StringUtils.isNotEmpty(c.getString(COMPUTATIONAL_ID))) {
-
-			return Optional.of(DataEngineShape.builder()
-					.shape(shape)
-					.status(UserInstanceStatus.of(c.getString(STATUS)))
-					.slaveCount(c.getString(DATAENGINE_INSTANCE_COUNT))
-					.tags((Map<String, String>) c.get(TAGS))
-					.build());
+		if (StringUtils.isNotEmpty(filter.getDlabId())) {
+			searchCriteria.add(regex(DLAB_ID, filter.getDlabId(), "i"));
 		}
-		return Optional.empty();
-	}
-
-	private Optional<SsnShape> getSsnShape(List<String> shapeNames) {
-		final String shape = getSsnShape();
-		if (isShapeAcceptable(shapeNames, shape)) {
-			return Optional.of(SsnShape.builder()
-					.shape(shape)
-					.status(UserInstanceStatus.RUNNING)
-					.build());
+		if (StringUtils.isNotEmpty(filter.getDateStart())) {
+			searchCriteria.add(gte(USAGE_DATE, filter.getDateStart()));
 		}
-		return Optional.empty();
-	}
-
-	private Optional<EndpointShape> getEndpointShape(List<String> shapeNames, Document endpoint) {
-		if (isShapeAcceptable(shapeNames, getSsnShape())) {
-			return Optional.of(EndpointShape.builder()
-					.shape(StringUtils.EMPTY)
-					.status(UserInstanceStatus.of(endpoint.getString("status")))
-					.build());
+		if (StringUtils.isNotEmpty(filter.getDateEnd())) {
+			searchCriteria.add(lte(USAGE_DATE, filter.getDateEnd()));
 		}
-		return Optional.empty();
-	}
-
-	private boolean isDataEngine(String dockerImage) {
-		return DataEngineType.fromDockerImageName(dockerImage) == DataEngineType.SPARK_STANDALONE;
-	}
-
-	private boolean isShapeAcceptable(List<String> shapeNames, String... shapes) {
-		return shapeNames == null || shapeNames.isEmpty() || Arrays.stream(shapes).anyMatch(shapeNames::contains);
-	}
-
-	protected String getServiceBaseName() {
-		return settings.getServiceBaseName();
-	}
-
-	protected abstract String getSsnShape();
-
-	protected void usersToLowerCase(List<String> users) {
-		if (users != null) {
-			users.replaceAll(u -> u != null ? u.toLowerCase() : null);
+		if (CollectionUtils.isNotEmpty(filter.getProjects())) {
+			searchCriteria.add(in(PROJECT, filter.getProjects()));
 		}
+		if (CollectionUtils.isNotEmpty(filter.getProducts())) {
+			searchCriteria.add(in(PRODUCT, filter.getProducts()));
+		}
+		if (CollectionUtils.isNotEmpty(filter.getShapes())) {
+			searchCriteria.add(regex(SHAPE, "(" + String.join("|", filter.getShapes()) + ")"));
+		}
+
+		return searchCriteria;
 	}
 
-	protected void setUserFilter(UserInfo userInfo, BillingFilter filter, boolean isFullReport) {
-		if (isFullReport) {
-			usersToLowerCase(filter.getUser());
-		} else {
-			filter.setUser(Lists.newArrayList(userInfo.getName().toLowerCase()));
-		}
+	private BillingReportLine toBillingReport(Document d) {
+		Document id = (Document) d.get("_id");
+		return BillingReportLine.builder()
+				.dlabId(id.getString(DLAB_ID))
+				.project(id.getString(PROJECT))
+				.resourceName(id.getString(RESOURCE_NAME))
+				.exploratoryName(id.getString(EXPLORATORY))
+				.shape(id.getString(SHAPE))
+				.user(id.getString(USER))
+				.product(id.getString(PRODUCT))
+				.resourceType(Optional.ofNullable(id.getString(RESOURCE_TYPE)).map(BillingResourceType::valueOf).orElse(null))
+				.usageDateFrom(d.getDate(FROM).toInstant().atZone(ZoneId.systemDefault()).toLocalDate())
+				.usageDateTo(d.getDate(TO).toInstant().atZone(ZoneId.systemDefault()).toLocalDate())
+				.cost(BigDecimal.valueOf(d.getDouble(COST)).setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue())
+				.currency(id.getString(CURRENCY))
+				.build();
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseDAO.java
index 034011a..c2ff69b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseDAO.java
@@ -31,7 +31,11 @@
 import com.google.inject.Inject;
 import com.mongodb.BasicDBObject;
 import com.mongodb.MongoException;
-import com.mongodb.client.*;
+import com.mongodb.client.AggregateIterable;
+import com.mongodb.client.FindIterable;
+import com.mongodb.client.MongoCollection;
+import com.mongodb.client.MongoCursor;
+import com.mongodb.client.MongoIterable;
 import com.mongodb.client.model.UpdateOptions;
 import com.mongodb.client.result.DeleteResult;
 import com.mongodb.client.result.UpdateResult;
@@ -41,13 +45,21 @@
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
 
-import static com.mongodb.client.model.Filters.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.exists;
+import static com.mongodb.client.model.Filters.ne;
 
 /**
  * Implements the base API for Mongo database.
@@ -158,6 +170,29 @@
 	}
 
 	/**
+	 * Serializes objects and inserts into the collection.
+	 *
+	 * @param collection collection name.
+	 * @param object     for inserting to collection.
+	 */
+	protected void insertMany(String collection, List<Object> object) {
+		try {
+			mongoService.getCollection(collection)
+					.insertMany(convertToBson(object)
+							.stream()
+							.peek(o -> {
+								o.append(ID, generateUUID());
+								o.append(TIMESTAMP, new Date());
+							})
+							.collect(Collectors.toList())
+					);
+		} catch (MongoException e) {
+			LOGGER.warn("Insert to Mongo DB fails: {}", e.getLocalizedMessage(), e);
+			throw new DlabException("Insert to Mongo DB fails: " + e.getLocalizedMessage(), e);
+		}
+	}
+
+	/**
 	 * Updates single document in the collection by condition.
 	 *
 	 * @param collection collection name.
@@ -230,6 +265,22 @@
 	}
 
 	/**
+	 * Removes many documents in the collection by condition.
+	 *
+	 * @param collection collection name.
+	 * @param condition  condition for search documents in collection.
+	 */
+	protected DeleteResult deleteMany(String collection, Bson condition) {
+		try {
+			return mongoService.getCollection(collection)
+					.deleteMany(condition);
+		} catch (MongoException e) {
+			LOGGER.warn("Removing document from Mongo DB fails: {}", e.getLocalizedMessage(), e);
+			throw new DlabException("Removing document from Mongo DB fails: " + e.getLocalizedMessage(), e);
+		}
+	}
+
+	/**
 	 * Finds and returns all documents from the collection.
 	 *
 	 * @param collection collection name.
@@ -362,6 +413,13 @@
 		}
 	}
 
+	List<Document> convertToBson(List<Object> objects) {
+		return objects
+				.stream()
+				.map(this::convertToBson)
+				.collect(Collectors.toList());
+	}
+
 	/**
 	 * Finds and returns one object as given class from the collection by condition.
 	 *
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java
index cd03bff..67630cd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java
@@ -18,11 +18,12 @@
  */
 package com.epam.dlab.backendapi.dao;
 
-import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.domain.BillingReportLine;
 import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import org.bson.Document;
 
-public interface BillingDAO<T extends BillingFilter> {
+import java.util.List;
+
+public interface BillingDAO {
 	Double getTotalCost();
 
 	Double getUserCost(String user);
@@ -32,6 +33,7 @@
 	int getBillingQuoteUsed();
 
 	int getBillingUserQuoteUsed(String user);
+
 	int getBillingProjectQuoteUsed(String project);
 
 	boolean isBillingQuoteReached();
@@ -40,5 +42,13 @@
 
 	boolean isProjectQuoteReached(String project);
 
-	Document getReport(UserInfo userInfo, T filter);
+	List<BillingReportLine> findBillingData(String project, String endpoint, List<String> resourceNames);
+
+	List<BillingReportLine> aggregateBillingData(BillingFilter filter);
+
+	void deleteByUsageDate(String application, String usageDate);
+
+	void deleteByUsageDateRegex(String application, String usageDate);
+
+	void save(List<BillingReportLine> billingData);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
index 311158a..683f8fc 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
@@ -21,7 +21,11 @@
 
 
 import com.epam.dlab.backendapi.util.DateRemoverUtil;
-import com.epam.dlab.dto.*;
+import com.epam.dlab.dto.ResourceURL;
+import com.epam.dlab.dto.SchedulerJobDTO;
+import com.epam.dlab.dto.StatusEnvBaseDTO;
+import com.epam.dlab.dto.UserInstanceDTO;
+import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.computational.ComputationalStatusDTO;
@@ -36,15 +40,30 @@
 
 import java.time.LocalDateTime;
 import java.time.ZoneId;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
 
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.UPTIME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
 import static com.epam.dlab.backendapi.dao.SchedulerJobDAO.SCHEDULER_DATA;
 import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
-import static com.mongodb.client.model.Filters.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.ne;
+import static com.mongodb.client.model.Filters.not;
 import static com.mongodb.client.model.Projections.elemMatch;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static com.mongodb.client.model.Updates.push;
 import static com.mongodb.client.model.Updates.set;
 import static java.util.stream.Collectors.toList;
@@ -70,8 +89,8 @@
 		return COMPUTATIONAL_RESOURCES + FIELD_SET_DELIMETER + fieldName;
 	}
 
-	private static Bson computationalCondition(String user, String exploratoryName, String compName) {
-		return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName),
+	private static Bson computationalCondition(String user, String project, String exploratoryName, String compName) {
+		return and(eq(USER, user), eq(PROJECT, project), eq(EXPLORATORY_NAME, exploratoryName),
 				eq(COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_NAME, compName));
 	}
 
@@ -80,12 +99,14 @@
 	 *
 	 * @param user             user name.
 	 * @param exploratoryName  name of exploratory.
+	 * @param project          name of project
 	 * @param computationalDTO object of computational resource.
 	 * @return <b>true</b> if operation was successful, otherwise <b>false</b>.
 	 */
-	public boolean addComputational(String user, String exploratoryName, UserComputationalResource computationalDTO) {
+	public boolean addComputational(String user, String exploratoryName, String project,
+									UserComputationalResource computationalDTO) {
 		final UpdateResult updateResult = updateOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						not(elemMatch(COMPUTATIONAL_RESOURCES,
 								eq(COMPUTATIONAL_NAME, computationalDTO.getComputationalName())))),
 				push(COMPUTATIONAL_RESOURCES, convertToBson(computationalDTO)));
@@ -96,14 +117,15 @@
 	 * Finds and returns the of computational resource.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @throws DlabException if exception occurs
 	 */
-	public UserComputationalResource fetchComputationalFields(String user, String exploratoryName,
+	public UserComputationalResource fetchComputationalFields(String user, String project, String exploratoryName,
 															  String computationalName) {
 		Optional<UserInstanceDTO> opt = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						Filters.elemMatch(COMPUTATIONAL_RESOURCES, eq(COMPUTATIONAL_NAME, computationalName))),
 				fields(include(COMPUTATIONAL_RESOURCES + ".$"), excludeId()),
 				UserInstanceDTO.class);
@@ -114,10 +136,10 @@
 						"exploratory name " + exploratoryName + " not found."));
 	}
 
-	public List<UserComputationalResource> findComputationalResourcesWithStatus(String user, String exploratoryName,
+	public List<UserComputationalResource> findComputationalResourcesWithStatus(String user, String project, String exploratoryName,
 																				UserInstanceStatus status) {
 		final UserInstanceDTO userInstanceDTO = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						elemMatch(COMPUTATIONAL_RESOURCES, eq(STATUS, status.toString()))),
 				fields(include(COMPUTATIONAL_RESOURCES), excludeId()),
 				UserInstanceDTO.class)
@@ -139,7 +161,7 @@
 		try {
 			Document values = new Document(computationalFieldFilter(STATUS), dto.getStatus());
 			return updateOne(USER_INSTANCES,
-					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 							elemMatch(COMPUTATIONAL_RESOURCES,
 									and(eq(COMPUTATIONAL_NAME, dto.getComputationalName()),
 											not(eq(STATUS, TERMINATED.toString()))))),
@@ -162,7 +184,7 @@
 		UpdateResult result;
 		do {
 			result = updateOne(USER_INSTANCES,
-					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 							elemMatch(COMPUTATIONAL_RESOURCES,
 									and(not(eq(STATUS, TERMINATED.toString())),
 											not(eq(STATUS, dto.getStatus()))))),
@@ -174,80 +196,51 @@
 		return count;
 	}
 
-	public void updateComputationalStatusesForExploratory(String user, String exploratoryName,
+	public void updateComputationalStatusesForExploratory(String user, String project, String exploratoryName,
 														  UserInstanceStatus dataengineStatus,
 														  UserInstanceStatus dataengineServiceStatus,
 														  UserInstanceStatus... excludedStatuses) {
-		updateComputationalResource(user, exploratoryName, dataengineStatus, DataEngineType.SPARK_STANDALONE,
-				excludedStatuses);
-		updateComputationalResource(user, exploratoryName, dataengineServiceStatus, DataEngineType.CLOUD_SERVICE,
-				excludedStatuses);
-
-	}
-
-	/**
-	 * Updates status for all corresponding computational resources in Mongo database.
-	 *
-	 * @param newStatus                new status for computational resources.
-	 * @param user                     user name.
-	 * @param exploratoryStatuses      exploratory's status list.
-	 * @param computationalTypes       type list of computational resource (may contain 'dataengine' and/or
-	 *                                 'dataengine-service').
-	 * @param oldComputationalStatuses old statuses of computational resources.
-	 */
-
-	public void updateStatusForComputationalResources(UserInstanceStatus newStatus, String user,
-													  List<UserInstanceStatus> exploratoryStatuses,
-													  List<DataEngineType> computationalTypes,
-													  UserInstanceStatus... oldComputationalStatuses) {
-
-		List<String> exploratoryNames = stream(find(USER_INSTANCES,
-				and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses))),
-				fields(include(EXPLORATORY_NAME)))).map(d -> d.getString(EXPLORATORY_NAME))
-				.collect(toList());
-
-		exploratoryNames.forEach(explName ->
-				getComputationalResourcesWhereStatusIn(user, computationalTypes, explName, oldComputationalStatuses)
-						.forEach(compName -> updateComputationalField(user, explName, compName,
-								STATUS, newStatus.toString()))
-		);
+		updateComputationalResource(user, project, exploratoryName, dataengineStatus,
+				DataEngineType.SPARK_STANDALONE, excludedStatuses);
+		updateComputationalResource(user, project, exploratoryName, dataengineServiceStatus,
+				DataEngineType.CLOUD_SERVICE, excludedStatuses);
 	}
 
 	/**
 	 * Updates the status for single computational resource in Mongo database.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   exploratory's name.
 	 * @param computationalName name of computational resource.
 	 * @param newStatus         new status of computational resource.
 	 */
 
-	public void updateStatusForComputationalResource(String user, String exploratoryName,
-													 String computationalName,
-													 UserInstanceStatus newStatus) {
-		updateComputationalField(user, exploratoryName, computationalName, STATUS, newStatus.toString());
+	public void updateStatusForComputationalResource(String user, String project, String exploratoryName,
+													 String computationalName, UserInstanceStatus newStatus) {
+		updateComputationalField(user, project, exploratoryName, computationalName, STATUS, newStatus.toString());
 	}
 
 
-	private void updateComputationalResource(String user, String exploratoryName,
+	private void updateComputationalResource(String user, String project, String exploratoryName,
 											 UserInstanceStatus dataengineServiceStatus, DataEngineType cloudService,
 											 UserInstanceStatus... excludedStatuses) {
 		UpdateResult result;
 		do {
 			result = updateMany(USER_INSTANCES,
-					computationalFilter(user, exploratoryName, dataengineServiceStatus.toString(),
-							DataEngineType.getDockerImageName(cloudService), excludedStatuses),
+					computationalFilter(user, project, exploratoryName,
+							dataengineServiceStatus.toString(), DataEngineType.getDockerImageName(cloudService), excludedStatuses),
 					new Document(SET,
 							new Document(computationalFieldFilter(STATUS), dataengineServiceStatus.toString())));
 		} while (result.getModifiedCount() > 0);
 	}
 
-	private Bson computationalFilter(String user, String exploratoryName, String computationalStatus, String
-			computationalImage, UserInstanceStatus[] excludedStatuses) {
+	private Bson computationalFilter(String user, String project, String exploratoryName, String computationalStatus,
+									 String computationalImage, UserInstanceStatus[] excludedStatuses) {
 		final String[] statuses = Arrays.stream(excludedStatuses)
 				.map(UserInstanceStatus::toString)
 				.toArray(String[]::new);
-		return and(exploratoryCondition(user, exploratoryName),
+		return and(exploratoryCondition(user, exploratoryName, project),
 				elemMatch(COMPUTATIONAL_RESOURCES, and(eq(IMAGE, computationalImage),
 						not(in(STATUS, statuses)),
 						not(eq(STATUS, computationalStatus)))));
@@ -286,7 +279,7 @@
 				values.append(computationalFieldFilter(CONFIG),
 						dto.getConfig().stream().map(this::convertToBson).collect(toList()));
 			}
-			return updateOne(USER_INSTANCES, and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+			return updateOne(USER_INSTANCES, and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 					elemMatch(COMPUTATIONAL_RESOURCES,
 							and(eq(COMPUTATIONAL_NAME, dto.getComputationalName()),
 									not(eq(STATUS, TERMINATED.toString()))))),
@@ -309,49 +302,19 @@
 		return map;
 	}
 
-
-	/**
-	 * Updates the requirement for reuploading key for all corresponding computational resources in Mongo database.
-	 *
-	 * @param user                  user name.
-	 * @param exploratoryStatuses   exploratory's status list.
-	 * @param computationalTypes    type list of computational resource (may contain 'dataengine' and/or
-	 *                              'dataengine-service').
-	 * @param reuploadKeyRequired   true/false.
-	 * @param computationalStatuses statuses of computational resource.
-	 */
-
-	public void updateReuploadKeyFlagForComputationalResources(String user,
-															   List<UserInstanceStatus> exploratoryStatuses,
-															   List<DataEngineType> computationalTypes,
-															   boolean reuploadKeyRequired,
-															   UserInstanceStatus... computationalStatuses) {
-
-		List<String> exploratoryNames = stream(find(USER_INSTANCES,
-				and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses))),
-				fields(include(EXPLORATORY_NAME)))).map(d -> d.getString(EXPLORATORY_NAME))
-				.collect(toList());
-
-		exploratoryNames.forEach(explName ->
-				getComputationalResourcesWhereStatusIn(user, computationalTypes, explName, computationalStatuses)
-						.forEach(compName -> updateComputationalField(user, explName, compName,
-								REUPLOAD_KEY_REQUIRED, reuploadKeyRequired))
-		);
-	}
-
 	/**
 	 * Updates the requirement for reuploading key for single computational resource in Mongo database.
 	 *
 	 * @param user                user name.
+	 * @param project             project name
 	 * @param exploratoryName     exploratory's name.
 	 * @param computationalName   name of computational resource.
 	 * @param reuploadKeyRequired true/false.
 	 */
 
-	public void updateReuploadKeyFlagForComputationalResource(String user, String exploratoryName,
-															  String computationalName, boolean
-																	  reuploadKeyRequired) {
-		updateComputationalField(user, exploratoryName, computationalName, REUPLOAD_KEY_REQUIRED, reuploadKeyRequired);
+	public void updateReuploadKeyFlagForComputationalResource(String user, String project, String exploratoryName,
+															  String computationalName, boolean reuploadKeyRequired) {
+		updateComputationalField(user, project, exploratoryName, computationalName, REUPLOAD_KEY_REQUIRED, reuploadKeyRequired);
 	}
 
 	/**
@@ -359,6 +322,7 @@
 	 * have predefined type.
 	 *
 	 * @param user                  user name.
+	 * @param project               project name
 	 * @param computationalTypes    type list of computational resource which may contain 'dataengine' and/or
 	 *                              'dataengine-service'.
 	 * @param exploratoryName       name of exploratory.
@@ -367,10 +331,11 @@
 	 */
 
 	@SuppressWarnings("unchecked")
-	public List<String> getComputationalResourcesWhereStatusIn(String user, List<DataEngineType> computationalTypes,
+	public List<String> getComputationalResourcesWhereStatusIn(String user, String project,
+															   List<DataEngineType> computationalTypes,
 															   String exploratoryName,
 															   UserInstanceStatus... computationalStatuses) {
-		return stream((List<Document>) find(USER_INSTANCES, and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName)),
+		return stream((List<Document>) find(USER_INSTANCES, exploratoryCondition(user, exploratoryName, project),
 				fields(include(COMPUTATIONAL_RESOURCES))).first().get(COMPUTATIONAL_RESOURCES))
 				.filter(doc ->
 						statusList(computationalStatuses).contains(doc.getString(STATUS)) &&
@@ -379,9 +344,9 @@
 	}
 
 	@SuppressWarnings("unchecked")
-	public List<ClusterConfig> getClusterConfig(String user, String exploratoryName, String computationalName) {
+	public List<ClusterConfig> getClusterConfig(String user, String project, String exploratoryName, String computationalName) {
 		return findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						Filters.elemMatch(COMPUTATIONAL_RESOURCES, and(eq(COMPUTATIONAL_NAME, computationalName),
 								notNull(CONFIG)))),
 				fields(include(COMPUTATIONAL_RESOURCES + ".$"), excludeId())
@@ -396,41 +361,42 @@
 	 * Updates computational resource's field.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @param fieldName         computational field's name for updating.
 	 * @param fieldValue        computational field's value for updating.
 	 */
 
-	private <T> UpdateResult updateComputationalField(String user, String exploratoryName, String computationalName,
+	private <T> UpdateResult updateComputationalField(String user, String project, String exploratoryName, String computationalName,
 													  String fieldName, T fieldValue) {
 		return updateOne(USER_INSTANCES,
-				computationalCondition(user, exploratoryName, computationalName),
+				computationalCondition(user, project, exploratoryName, computationalName),
 				set(computationalFieldFilter(fieldName), fieldValue));
 	}
 
-	public void updateSchedulerSyncFlag(String user, String exploratoryName, boolean syncFlag) {
+	public void updateSchedulerSyncFlag(String user, String project, String exploratoryName, boolean syncFlag) {
 		final String syncStartField = SCHEDULER_DATA + ".sync_start_required";
 		UpdateResult result;
 		do {
 
-			result = updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName),
+			result = updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project),
 					elemMatch(COMPUTATIONAL_RESOURCES, and(ne(SCHEDULER_DATA, null), ne(syncStartField, syncFlag)))),
 					set(computationalFieldFilter(syncStartField), syncFlag));
 
 		} while (result.getModifiedCount() != 0);
 	}
 
-	public UpdateResult updateSchedulerDataForComputationalResource(String user, String exploratoryName,
+	public UpdateResult updateSchedulerDataForComputationalResource(String user, String project, String exploratoryName,
 																	String computationalName, SchedulerJobDTO dto) {
-		return updateComputationalField(user, exploratoryName, computationalName, SCHEDULER_DATA,
-				Objects.isNull(dto) ? null : convertToBson(dto));
+		return updateComputationalField(user, project, exploratoryName, computationalName,
+				SCHEDULER_DATA, Objects.isNull(dto) ? null : convertToBson(dto));
 	}
 
-	public void updateLastActivity(String user, String exploratoryName,
+	public void updateLastActivity(String user, String project, String exploratoryName,
 								   String computationalName, LocalDateTime lastActivity) {
 		updateOne(USER_INSTANCES,
-				computationalCondition(user, exploratoryName, computationalName),
+				computationalCondition(user, project, exploratoryName, computationalName),
 				set(computationalFieldFilter(COMPUTATIONAL_LAST_ACTIVITY),
 						Date.from(lastActivity.atZone(ZoneId.systemDefault()).toInstant())));
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAO.java
index d409823..f28539e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAO.java
@@ -24,11 +24,30 @@
 import java.util.List;
 import java.util.Optional;
 
+/**
+ * The interface specifies behaviour for objects, which retrieve, update, remove
+ * the endpoints entities from the DataBase, according passed fields, i.e name, url, status.
+ */
 public interface EndpointDAO {
 	List<EndpointDTO> getEndpoints();
+
+	List<EndpointDTO> getEndpointsWithStatus(String status);
+
+	/*** Retrieve the Endpoint entity according required name
+	 * @param name - the Endpoint regular title
+	 * @return the Optional object
+	 */
 	Optional<EndpointDTO> get(String name);
 
+	/*** Retrieve the Endpoint entity according required Endpoint URL
+	 * @param url - the Endpoint web address
+	 * @return the Optional object
+	 */
+	Optional<EndpointDTO> getEndpointWithUrl(String url);
+
 	void create(EndpointDTO endpointDTO);
 
+	void updateEndpointStatus(String name, String status);
+
 	void remove(String name);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAOImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAOImpl.java
index 16a0408..aec56ec 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAOImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EndpointDAOImpl.java
@@ -20,16 +20,23 @@
 package com.epam.dlab.backendapi.dao;
 
 import com.epam.dlab.backendapi.domain.EndpointDTO;
+import org.bson.Document;
 import org.bson.conversions.Bson;
 
 import java.util.List;
 import java.util.Optional;
+import java.util.regex.Pattern;
 
 import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.regex;
+
 
 public class EndpointDAOImpl extends BaseDAO implements EndpointDAO {
 
 	private static final String ENDPOINTS_COLLECTION = "endpoints";
+	private static final String ENDPOINT_NAME_FIELD = "name";
+	private static final String ENDPOINT_STATUS_FIELD = "status";
+	private static final String ENDPOINT_URL_FIELD = "url";
 
 	@Override
 	public List<EndpointDTO> getEndpoints() {
@@ -37,6 +44,16 @@
 	}
 
 	@Override
+	public List<EndpointDTO> getEndpointsWithStatus(String status) {
+		return find(ENDPOINTS_COLLECTION, endpointStatusCondition(status), EndpointDTO.class);
+	}
+
+	@Override
+	public Optional<EndpointDTO> getEndpointWithUrl(String url) {
+		return findOne(ENDPOINTS_COLLECTION, endpointUrlCondition(url), EndpointDTO.class);
+	}
+
+	@Override
 	public Optional<EndpointDTO> get(String name) {
 		return findOne(ENDPOINTS_COLLECTION, endpointCondition(name), EndpointDTO.class);
 	}
@@ -47,11 +64,27 @@
 	}
 
 	@Override
+	public void updateEndpointStatus(String name, String status) {
+		final Document updatedFiled = new Document(ENDPOINT_STATUS_FIELD, status);
+		updateOne(ENDPOINTS_COLLECTION, endpointCondition(name), new Document(SET, updatedFiled));
+	}
+
+	@Override
 	public void remove(String name) {
 		deleteOne(ENDPOINTS_COLLECTION, endpointCondition(name));
 	}
 
 	private Bson endpointCondition(String name) {
-		return eq("name", name);
+		Pattern endPointName = Pattern.compile("^" + name + "$", Pattern.CASE_INSENSITIVE);
+		return regex(ENDPOINT_NAME_FIELD, endPointName);
+	}
+
+	private Bson endpointUrlCondition(String url) {
+		Pattern endPointUrl = Pattern.compile("^" + url + "$", Pattern.CASE_INSENSITIVE);
+		return regex(ENDPOINT_URL_FIELD, endPointUrl);
+	}
+
+	private Bson endpointStatusCondition(String status) {
+		return eq(ENDPOINT_STATUS_FIELD, status);
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
index bbe0a2f..f554873 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
@@ -22,11 +22,7 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.SelfServiceApplication;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.domain.EnvStatusListener;
 import com.epam.dlab.backendapi.resources.aws.ComputationalResourceAws;
-import com.epam.dlab.backendapi.resources.dto.HealthStatusEnum;
-import com.epam.dlab.backendapi.resources.dto.HealthStatusPageDTO;
-import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.base.DataEngineType;
@@ -42,18 +38,39 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import java.util.stream.Stream;
 
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_EDGE;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
-import static com.mongodb.client.model.Filters.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.not;
+import static com.mongodb.client.model.Filters.or;
 import static com.mongodb.client.model.Projections.elemMatch;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static java.util.Objects.nonNull;
 
 /**
@@ -147,31 +164,20 @@
 	}
 
 	/**
-	 * @param user       the name of user.
-	 * @param fullReport return full report if <b>true</b> otherwise common status only.
-	 * @throws DlabException in case of any exception
-	 */
-	public HealthStatusPageDTO getHealthStatusPageDTO(String user, boolean fullReport) {
-		return new HealthStatusPageDTO()
-				.withStatus(HealthStatusEnum.OK)
-				.withListResources(Collections.emptyList());
-	}
-
-
-	/**
 	 * Updates the status of exploratory and computational for user.
 	 *
-	 * @param user the name of user.
-	 * @param list the status of node.
+	 * @param user    the name of user.
+	 * @param project name of project
+	 * @param list    the status of node.
 	 */
-	public void updateEnvStatus(String user, EnvResourceList list) {
+	public void updateEnvStatus(String user, String project, EnvResourceList list) {
 		if (list != null && notEmpty(list.getHostList())) {
 			updateEdgeStatus(user, list.getHostList());
 			if (!list.getHostList().isEmpty()) {
 				stream(find(USER_INSTANCES, eq(USER, user),
 						fields(INCLUDE_EXP_UPDATE_FIELDS, excludeId())))
 						.filter(this::instanceIdPresent)
-						.forEach(exp -> updateUserResourceStatuses(user, list, exp));
+						.forEach(exp -> updateUserResourceStatuses(user, project, list, exp));
 			}
 		}
 	}
@@ -191,32 +197,26 @@
 				.collect(Collectors.toSet());
 	}
 
-	public Set<String> fetchAllUsers() {
-		return stream(find(USER_EDGE)).map(d -> d.getString(ID))
-				.collect(Collectors.toSet());
-	}
-
 	@SuppressWarnings("unchecked")
-	private void updateUserResourceStatuses(String user, EnvResourceList list, Document exp) {
+	private void updateUserResourceStatuses(String user, String project, EnvResourceList list, Document exp) {
 		final String exploratoryName = exp.getString(EXPLORATORY_NAME);
 		getEnvResourceAndRemove(list.getHostList(), exp.getString(INSTANCE_ID))
-				.ifPresent(resource -> updateExploratoryStatus(user, exploratoryName, exp.getString(STATUS),
-						resource.getStatus()));
+				.ifPresent(resource -> updateExploratoryStatus(user, project, exploratoryName,
+						exp.getString(STATUS), resource.getStatus()));
 
 		(getComputationalResources(exp))
 				.stream()
 				.filter(this::instanceIdPresent)
-				.forEach(comp -> updateComputational(user, list, exploratoryName, comp));
+				.forEach(comp -> updateComputational(user, project, list, exploratoryName, comp));
 	}
 
-	private void updateComputational(String user, EnvResourceList list, String exploratoryName, Document comp) {
+	private void updateComputational(String user, String project, EnvResourceList list, String exploratoryName, Document comp) {
 		final List<EnvResource> listToCheck = DataEngineType.CLOUD_SERVICE ==
 				DataEngineType.fromDockerImageName(comp.getString(IMAGE)) ?
 				list.getClusterList() : list.getHostList();
 		getEnvResourceAndRemove(listToCheck, comp.getString(INSTANCE_ID))
-				.ifPresent(resource -> updateComputationalStatus(user, exploratoryName,
-						comp.getString(ComputationalDAO.COMPUTATIONAL_NAME), comp.getString(STATUS), resource
-								.getStatus()));
+				.ifPresent(resource -> updateComputationalStatus(user, project, exploratoryName,
+						comp.getString(ComputationalDAO.COMPUTATIONAL_NAME), comp.getString(STATUS), resource.getStatus()));
 	}
 
 	private boolean instanceIdPresent(Document d) {
@@ -341,11 +341,12 @@
 	 * Update the status of exploratory if it needed.
 	 *
 	 * @param user            the user name
+	 * @param project         project name
 	 * @param exploratoryName the name of exploratory
 	 * @param oldStatus       old status
 	 * @param newStatus       new status
 	 */
-	private void updateExploratoryStatus(String user, String exploratoryName,
+	private void updateExploratoryStatus(String user, String project, String exploratoryName,
 										 String oldStatus, String newStatus) {
 		LOGGER.trace("Update exploratory status for user {} with exploratory {} from {} to {}", user, exploratoryName,
 				oldStatus, newStatus);
@@ -358,7 +359,7 @@
 			LOGGER.debug("Exploratory status for user {} with exploratory {} will be updated from {} to {}", user,
 					exploratoryName, oldStatus, status);
 			updateOne(USER_INSTANCES,
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					Updates.set(STATUS, status.toString()));
 		}
 	}
@@ -401,12 +402,13 @@
 	 * Update the status of exploratory if it needed.
 	 *
 	 * @param user              the user name.
+	 * @param project           project name
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName the name of computational.
 	 * @param oldStatus         old status.
 	 * @param newStatus         new status.
 	 */
-	private void updateComputationalStatus(String user, String exploratoryName, String computationalName,
+	private void updateComputationalStatus(String user, String project, String exploratoryName, String computationalName,
 										   String oldStatus, String newStatus) {
 		LOGGER.trace("Update computational status for user {} with exploratory {} and computational {} from {} to {}",
 				user, exploratoryName, computationalName, oldStatus, newStatus);
@@ -421,13 +423,13 @@
 			LOGGER.debug("Computational status for user {} with exploratory {} and computational {} will be updated " +
 							"from {} to {}",
 					user, exploratoryName, computationalName, oldStatus, status);
-			if (configuration.getCloudProvider() == CloudProvider.AWS && status == UserInstanceStatus.TERMINATED &&
-					terminateComputationalSpot(user, exploratoryName, computationalName)) {
+			if (status == UserInstanceStatus.TERMINATED &&
+					terminateComputationalSpot(user, project, exploratoryName, computationalName)) {
 				return;
 			}
 			Document values = new Document(COMPUTATIONAL_STATUS_FILTER, status.toString());
 			updateOne(USER_INSTANCES,
-					and(exploratoryCondition(user, exploratoryName),
+					and(exploratoryCondition(user, exploratoryName, project),
 							elemMatch(COMPUTATIONAL_RESOURCES,
 									and(eq(ComputationalDAO.COMPUTATIONAL_NAME, computationalName))
 							)
@@ -440,15 +442,16 @@
 	 * Terminate EMR if it is spot.
 	 *
 	 * @param user              the user name.
+	 * @param project           name of project
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName the name of computational.
 	 * @return <b>true</b> if computational is spot and should be terminated by docker, otherwise <b>false</b>.
 	 */
-	private boolean terminateComputationalSpot(String user, String exploratoryName, String computationalName) {
+	private boolean terminateComputationalSpot(String user, String project, String exploratoryName, String computationalName) {
 		LOGGER.trace("Check computatation is spot for user {} with exploratory {} and computational {}", user,
 				exploratoryName, computationalName);
 		Document doc = findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				and(elemMatch(COMPUTATIONAL_RESOURCES,
 						and(eq(ComputationalDAO.COMPUTATIONAL_NAME, computationalName),
 								eq(COMPUTATIONAL_SPOT, true),
@@ -459,9 +462,7 @@
 			return false;
 		}
 
-		EnvStatusListener envStatusListener =
-				SelfServiceApplication.getInjector().getInstance(EnvStatusListener.class);
-		UserInfo userInfo = (envStatusListener != null) ? envStatusListener.getSession(user) : null;
+		UserInfo userInfo = null;
 		if (userInfo == null) {
 			// User logged off. Computational will be terminated when user logged in.
 			return true;
@@ -475,7 +476,7 @@
 			ComputationalResourceAws computational = new ComputationalResourceAws();
 			SelfServiceApplication.getInjector().injectMembers(computational);
 			UserInfo ui = new UserInfo(user, accessToken);
-			computational.terminate(ui, exploratoryName, computationalName);
+			computational.terminate(ui, project, exploratoryName, computationalName);
 		} catch (Exception e) {
 			// Cannot terminate EMR, just update status to terminated
 			LOGGER.warn("Can't terminate computational for user {} with exploratory {} and computational {}. {}",
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
index ca11b9d..fc44569 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
@@ -21,7 +21,11 @@
 
 
 import com.epam.dlab.backendapi.util.DateRemoverUtil;
-import com.epam.dlab.dto.*;
+import com.epam.dlab.dto.ResourceURL;
+import com.epam.dlab.dto.SchedulerJobDTO;
+import com.epam.dlab.dto.StatusEnvBaseDTO;
+import com.epam.dlab.dto.UserInstanceDTO;
+import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.dto.exploratory.ExploratoryStatusDTO;
 import com.epam.dlab.exceptions.DlabException;
@@ -35,16 +39,28 @@
 
 import java.time.LocalDateTime;
 import java.time.ZoneId;
-import java.util.*;
+import java.util.Collections;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
 import java.util.stream.Collectors;
 
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
 import static com.epam.dlab.backendapi.dao.SchedulerJobDAO.SCHEDULER_DATA;
-import static com.mongodb.client.model.Filters.*;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.not;
+import static com.mongodb.client.model.Filters.or;
+import static com.mongodb.client.model.Projections.exclude;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static com.mongodb.client.model.Updates.set;
 import static java.util.stream.Collectors.toList;
-import static org.apache.commons.lang3.StringUtils.EMPTY;
 
 /**
  * DAO for user exploratory.
@@ -68,27 +84,24 @@
 	public static final String EXPLORATORY_NOT_FOUND_MSG = "Exploratory for user %s with name %s not found";
 	private static final String EXPLORATORY_LAST_ACTIVITY = "last_activity";
 	private static final String PROJECT = "project";
+	private static final String ENDPOINT = "endpoint";
 
 	public ExploratoryDAO() {
 		log.info("{} is initialized", getClass().getSimpleName());
 	}
 
-	static Bson exploratoryCondition(String user, String exploratoryName) {
-		return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName));
+	static Bson exploratoryCondition(String user, String exploratoryName, String project) {
+		return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName), eq(PROJECT, project));
 	}
 
-	private Bson exploratoryStatusCondition(String user, UserInstanceStatus... exploratoryStatuses) {
-		return and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses)));
-	}
-
-	private static Bson runningExploratoryCondition(String user, String exploratoryName) {
-		return and(eq(USER, user),
+	private static Bson runningExploratoryCondition(String user, String exploratoryName, String project) {
+		return and(eq(USER, user), eq(PROJECT, project),
 				and(eq(EXPLORATORY_NAME, exploratoryName), eq(STATUS, UserInstanceStatus.RUNNING.toString())));
 	}
 
-	static Bson runningExploratoryAndComputationalCondition(String user, String exploratoryName, String
-			computationalName) {
-		return and(eq(USER, user),
+	static Bson runningExploratoryAndComputationalCondition(String user, String project, String exploratoryName,
+															String computationalName) {
+		return and(eq(USER, user), eq(PROJECT, project),
 				and(eq(EXPLORATORY_NAME, exploratoryName), eq(STATUS, UserInstanceStatus.RUNNING.toString()),
 						eq(COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_NAME, computationalName),
 						eq(COMPUTATIONAL_RESOURCES + "." + STATUS, UserInstanceStatus.RUNNING.toString())));
@@ -102,23 +115,8 @@
 	 */
 	public Iterable<Document> findExploratory(String user) {
 		return find(USER_INSTANCES, eq(USER, user),
-				fields(exclude(ExploratoryLibDAO.EXPLORATORY_LIBS,
-						ExploratoryLibDAO.COMPUTATIONAL_LIBS,
-						SCHEDULER_DATA)));
-	}
-
-	/**
-	 * Finds and returns the unique id for exploratory.
-	 *
-	 * @param user            user name.
-	 * @param exploratoryName the name of exploratory.
-	 */
-	public String fetchExploratoryId(String user, String exploratoryName) {
-		return findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
-				fields(include(EXPLORATORY_ID), excludeId()))
-				.orElse(new Document())
-				.getOrDefault(EXPLORATORY_ID, EMPTY).toString();
+				fields(exclude(ExploratoryLibDAO.EXPLORATORY_LIBS, ExploratoryLibDAO.COMPUTATIONAL_LIBS, SCHEDULER_DATA,
+						EXPLORATORY_USER, EXPLORATORY_PASS)));
 	}
 
 	/**
@@ -134,13 +132,26 @@
 		return getUserInstances(and(eq(PROJECT, project), eq(STATUS, UserInstanceStatus.RUNNING.toString())), false);
 	}
 
-	/**
-	 * Finds and returns the info of all user's notebooks whose status is present among predefined ones.
-	 *
-	 * @param user                        user name.
-	 * @param computationalFieldsRequired true/false.
-	 * @param statuses                    array of statuses.
-	 */
+	public List<UserInstanceDTO> fetchRunningExploratoryFieldsForProject(String project, List<String> endpoints) {
+		return getUserInstances(and(eq(PROJECT, project), eq(STATUS, UserInstanceStatus.RUNNING.toString()), in(ENDPOINT, endpoints)), false);
+	}
+
+	public List<UserInstanceDTO> fetchExploratoryFieldsForProject(String project) {
+		return getUserInstances(and(eq(PROJECT, project)), false);
+	}
+
+	public List<UserInstanceDTO> fetchExploratoryFieldsForProjectWithComp(String project) {
+		return getUserInstances(and(eq(PROJECT, project)), true);
+	}
+
+	public List<UserInstanceDTO> fetchExploratoryFieldsForProjectWithComp(List<String> projects) {
+		return getUserInstances(and(in(PROJECT, projects)), true);
+	}
+
+	public List<UserInstanceDTO> findExploratories(String project, String endpoint, String user) {
+		return getUserInstances(and(eq(PROJECT, project), eq(ENDPOINT, endpoint), eq(USER, user)), true);
+	}
+
 	public List<UserInstanceDTO> fetchUserExploratoriesWhereStatusIn(String user, boolean computationalFieldsRequired,
 																	 UserInstanceStatus... statuses) {
 		final List<String> statusList = statusList(statuses);
@@ -188,18 +199,17 @@
 				false);
 	}
 
-	/**
-	 * Finds and returns the info of all user's notebooks whose status is absent among predefined ones.
-	 *
-	 * @param user     user name.
-	 * @param statuses array of statuses.
-	 */
-	public List<UserInstanceDTO> fetchUserExploratoriesWhereStatusNotIn(String user, UserInstanceStatus... statuses) {
-		final List<String> statusList = statusList(statuses);
+	public List<UserInstanceDTO> fetchProjectEndpointExploratoriesWhereStatusIn(String project, List<String> endpoints,
+																				List<UserInstanceStatus> exploratoryStatuses,
+																				UserInstanceStatus... computationalStatuses) {
+		final List<String> exploratoryStatusList = statusList(exploratoryStatuses);
+		final List<String> computationalStatusList = statusList(computationalStatuses);
 		return getUserInstances(
 				and(
-						eq(USER, user),
-						not(in(STATUS, statusList))
+						eq(PROJECT, project),
+						in(ENDPOINT, endpoints),
+						or(in(STATUS, exploratoryStatusList),
+								in(COMPUTATIONAL_RESOURCES + "." + STATUS, computationalStatusList))
 				),
 				false);
 	}
@@ -210,12 +220,24 @@
 		return getUserInstances(
 				and(
 						eq(PROJECT, project),
-						eq("endpoint", endpoint),
+						eq(ENDPOINT, endpoint),
 						not(in(STATUS, statusList))
 				),
 				false);
 	}
 
+	public List<UserInstanceDTO> fetchExploratoriesByEndpointWhereStatusNotIn(String endpoint,
+																			  List<UserInstanceStatus> statuses) {
+		final List<String> exploratoryStatusList = statusList(statuses);
+
+		return getUserInstances(
+				and(
+						eq(ENDPOINT, endpoint),
+						not(in(STATUS, exploratoryStatusList))
+				),
+				false);
+	}
+
 	private List<UserInstanceDTO> getUserInstances(Bson condition, boolean computationalFieldsRequired) {
 		return stream(getCollection(USER_INSTANCES)
 				.find(condition)
@@ -247,35 +269,25 @@
 	 * Finds and returns the info of exploratory (without info about computational resources).
 	 *
 	 * @param user            user name.
+	 * @param project         project name
 	 * @param exploratoryName the name of exploratory.
 	 */
-	public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName) {
-		return getExploratory(user, exploratoryName, false).orElseThrow(() ->
+	public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName) {
+		return getExploratory(user, project, exploratoryName, false).orElseThrow(() ->
 				new ResourceNotFoundException(String.format(EXPLORATORY_NOT_FOUND_MSG, user, exploratoryName)));
 
 	}
 
-	public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName,
-												  boolean includeComputationalResources) {
-		return getExploratory(user, exploratoryName, includeComputationalResources).orElseThrow(() ->
+	public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName, boolean includeCompResources) {
+		return getExploratory(user, project, exploratoryName, includeCompResources).orElseThrow(() ->
 				new ResourceNotFoundException(String.format(EXPLORATORY_NOT_FOUND_MSG, user, exploratoryName)));
 
 	}
 
-	/**
-	 * Checks if exploratory exists.
-	 *
-	 * @param user            user name.
-	 * @param exploratoryName the name of exploratory.
-	 */
-	public boolean isExploratoryExist(String user, String exploratoryName) {
-		return getExploratory(user, exploratoryName, false).isPresent();
-	}
-
-	private Optional<UserInstanceDTO> getExploratory(String user, String exploratoryName,
+	private Optional<UserInstanceDTO> getExploratory(String user, String project, String exploratoryName,
 													 boolean includeCompResources) {
 		return findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				includeCompResources ? null : fields(exclude(COMPUTATIONAL_RESOURCES)),
 				UserInstanceDTO.class);
 	}
@@ -284,12 +296,13 @@
 	 * Finds and returns the info of running exploratory with running cluster.
 	 *
 	 * @param user              user name.
+	 * @param project           name of project
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of cluster
 	 */
-	public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName, String computationalName) {
+	public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName, String computationalName) {
 		return findOne(USER_INSTANCES,
-				runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+				runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 				UserInstanceDTO.class)
 				.orElseThrow(() -> new DlabException(String.format("Running notebook %s with running cluster %s not " +
 								"found for user %s",
@@ -300,10 +313,11 @@
 	 * Finds and returns the info of running exploratory.
 	 *
 	 * @param user            user name.
+	 * @param project         project
 	 * @param exploratoryName name of exploratory.
 	 */
-	public UserInstanceDTO fetchRunningExploratoryFields(String user, String exploratoryName) {
-		return findOne(USER_INSTANCES, runningExploratoryCondition(user, exploratoryName),
+	public UserInstanceDTO fetchRunningExploratoryFields(String user, String project, String exploratoryName) {
+		return findOne(USER_INSTANCES, runningExploratoryCondition(user, exploratoryName, project),
 				fields(exclude(COMPUTATIONAL_RESOURCES)), UserInstanceDTO.class)
 				.orElseThrow(() -> new DlabException(
 						String.format("Running exploratory instance for user %s with name %s not found.",
@@ -327,34 +341,22 @@
 	 */
 	public UpdateResult updateExploratoryStatus(StatusEnvBaseDTO<?> dto) {
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+				exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 				set(STATUS, dto.getStatus()));
 	}
 
 	/**
-	 * Updates the status for all user's corresponding exploratories in Mongo database.
-	 *
-	 * @param newExploratoryStatus   new status for exploratories.
-	 * @param user                   user name.
-	 * @param oldExploratoryStatuses old statuses of exploratories.
-	 */
-	public void updateStatusForExploratories(UserInstanceStatus newExploratoryStatus, String user,
-											 UserInstanceStatus... oldExploratoryStatuses) {
-		updateMany(USER_INSTANCES, exploratoryStatusCondition(user, oldExploratoryStatuses),
-				set(STATUS, newExploratoryStatus.toString()));
-	}
-
-	/**
 	 * Updates status for single exploratory in Mongo database.
 	 *
 	 * @param user            user.
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory.
 	 * @param newStatus       new status of exploratory.
 	 * @return The result of an update operation.
 	 */
-	public UpdateResult updateStatusForExploratory(String user, String exploratoryName, UserInstanceStatus newStatus) {
+	public UpdateResult updateStatusForExploratory(String user, String project, String exploratoryName, UserInstanceStatus newStatus) {
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				set(STATUS, newStatus.toString()));
 	}
 
@@ -362,40 +364,29 @@
 	 * Updates the scheduler's data for exploratory in Mongo database.
 	 *
 	 * @param user            user.
+	 * @param project         name of project
 	 * @param exploratoryName name of exploratory.
 	 * @param dto             object of scheduler data.
 	 * @return The result of an update operation.
 	 */
-	public UpdateResult updateSchedulerDataForUserAndExploratory(String user, String exploratoryName,
+	public UpdateResult updateSchedulerDataForUserAndExploratory(String user, String project, String exploratoryName,
 																 SchedulerJobDTO dto) {
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				set(SCHEDULER_DATA, Objects.isNull(dto) ? null : convertToBson(dto)));
 	}
 
 	/**
-	 * Updates the requirement for reuploading key for all user's corresponding exploratories in Mongo database.
-	 *
-	 * @param user                user name.
-	 * @param reuploadKeyRequired true/false.
-	 * @param exploratoryStatuses statuses of exploratory.
-	 */
-	public void updateReuploadKeyForExploratories(String user, boolean reuploadKeyRequired,
-												  UserInstanceStatus... exploratoryStatuses) {
-		updateMany(USER_INSTANCES, exploratoryStatusCondition(user, exploratoryStatuses),
-				set(REUPLOAD_KEY_REQUIRED, reuploadKeyRequired));
-	}
-
-	/**
 	 * Updates the requirement for reuploading key for single exploratory in Mongo database.
 	 *
 	 * @param user                user name.
+	 * @param project             project name
 	 * @param exploratoryName     exploratory's name
 	 * @param reuploadKeyRequired true/false.
 	 */
-	public void updateReuploadKeyForExploratory(String user, String exploratoryName, boolean reuploadKeyRequired) {
+	public void updateReuploadKeyForExploratory(String user, String project, String exploratoryName, boolean reuploadKeyRequired) {
 		updateOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				set(REUPLOAD_KEY_REQUIRED, reuploadKeyRequired));
 	}
 
@@ -433,7 +424,7 @@
 							}
 					).collect(Collectors.toList()));
 		} else if (dto.getPrivateIp() != null) {
-			UserInstanceDTO inst = fetchExploratoryFields(dto.getUser(), dto.getExploratoryName());
+			UserInstanceDTO inst = fetchExploratoryFields(dto.getUser(), dto.getProject(), dto.getExploratoryName());
 			if (!inst.getPrivateIp().equals(dto.getPrivateIp()) && inst.getResourceUrl() != null) {
 				values.append(EXPLORATORY_URL, inst.getResourceUrl().stream()
 						.map(url -> replaceIp(dto.getPrivateIp(), inst, url))
@@ -454,13 +445,13 @@
 			values.append(CLUSTER_CONFIG, dto.getConfig().stream().map(this::convertToBson).collect(toList()));
 		}
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+				exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 				new Document(SET, values));
 	}
 
-	public void updateExploratoryIp(String user, String ip, String exploratoryName) {
+	public void updateExploratoryIp(String user, String project, String ip, String exploratoryName) {
 
-		UserInstanceDTO inst = fetchExploratoryFields(user, exploratoryName);
+		UserInstanceDTO inst = fetchExploratoryFields(user, project, exploratoryName);
 		if (!inst.getPrivateIp().equals(ip)) {
 			Document values = new Document();
 			values.append(EXPLORATORY_PRIVATE_IP, ip);
@@ -471,15 +462,15 @@
 			}
 
 			updateOne(USER_INSTANCES,
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					new Document(SET, values));
 		}
 
 	}
 
 	@SuppressWarnings("unchecked")
-	public List<ClusterConfig> getClusterConfig(String user, String exploratoryName) {
-		return findOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName), notNull(CLUSTER_CONFIG)),
+	public List<ClusterConfig> getClusterConfig(String user, String project, String exploratoryName) {
+		return findOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project), notNull(CLUSTER_CONFIG)),
 				fields(include(CLUSTER_CONFIG), excludeId()))
 				.map(d -> convertFromDocument((List<Document>) d.get(CLUSTER_CONFIG),
 						new TypeReference<List<ClusterConfig>>() {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
index ebdd028..bcec258 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
@@ -38,11 +38,16 @@
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.runningExploratoryAndComputationalCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
 import static com.mongodb.client.model.Filters.and;
 import static com.mongodb.client.model.Filters.eq;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.elemMatch;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static com.mongodb.client.model.Updates.push;
 
 /**
@@ -98,17 +103,17 @@
 		return COMPUTATIONAL_LIBS + "." + computational + FIELD_SET_DELIMETER + fieldName;
 	}
 
-	private Document findLibraries(String user, String exploratoryName, Bson include) {
+	private Document findLibraries(String user, String project, String exploratoryName, Bson include) {
 		Optional<Document> opt = findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				fields(excludeId(), include));
 
 		return opt.orElseGet(Document::new);
 
 	}
 
-	public List<Library> getLibraries(String user, String exploratoryName) {
-		final Document libsDocument = findAllLibraries(user, exploratoryName);
+	public List<Library> getLibraries(String user, String project, String exploratoryName) {
+		final Document libsDocument = findAllLibraries(user, project, exploratoryName);
 		return Stream
 				.concat(
 						libraryStream(libsDocument, exploratoryName, EXPLORATORY_LIBS, ResourceType.EXPLORATORY),
@@ -116,24 +121,23 @@
 				.collect(Collectors.toList());
 	}
 
-	public Document findAllLibraries(String user, String exploratoryName) {
-		return findLibraries(user, exploratoryName, include(EXPLORATORY_LIBS, COMPUTATIONAL_LIBS,
+	public Document findAllLibraries(String user, String project, String exploratoryName) {
+		return findLibraries(user, project, exploratoryName, include(EXPLORATORY_LIBS, COMPUTATIONAL_LIBS,
 				COMPUTATIONAL_RESOURCES));
 	}
 
-	public Document findExploratoryLibraries(String user, String exploratoryName) {
-		return findLibraries(user, exploratoryName, include(EXPLORATORY_LIBS));
+	public Document findExploratoryLibraries(String user, String project, String exploratoryName) {
+		return findLibraries(user, project, exploratoryName, include(EXPLORATORY_LIBS));
 	}
 
-	public Document findComputationalLibraries(String user, String exploratoryName, String computationalName) {
-		return findLibraries(user, exploratoryName, include(COMPUTATIONAL_LIBS + "." + computationalName));
+	public Document findComputationalLibraries(String user, String project, String exploratoryName, String computationalName) {
+		return findLibraries(user, project, exploratoryName, include(COMPUTATIONAL_LIBS + "." + computationalName));
 	}
 
 	@SuppressWarnings("unchecked")
-	public Library getLibrary(String user, String exploratoryName,
-							  String libraryGroup, String libraryName) {
+	public Library getLibrary(String user, String project, String exploratoryName, String libraryGroup, String libraryName) {
 		Optional<Document> userInstance = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						elemMatch(EXPLORATORY_LIBS,
 								and(eq(LIB_GROUP, libraryGroup), eq(LIB_NAME, libraryName))
 						)),
@@ -153,10 +157,10 @@
 	}
 
 	@SuppressWarnings("unchecked")
-	public Library getLibrary(String user, String exploratoryName, String computationalName,
+	public Library getLibrary(String user, String project, String exploratoryName, String computationalName,
 							  String libraryGroup, String libraryName) {
 		Optional<Document> libraryStatus = findOne(USER_INSTANCES,
-				and(runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+				and(runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 						libraryConditionComputational(computationalName, libraryGroup, libraryName)
 				),
 
@@ -184,18 +188,19 @@
 	 * Add the user's library for exploratory into database.
 	 *
 	 * @param user            user name.
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory.
 	 * @param library         library.
 	 * @return <b>true</b> if operation was successful, otherwise <b>false</b>.
 	 */
-	public boolean addLibrary(String user, String exploratoryName, LibInstallDTO library, boolean reinstall) {
+	public boolean addLibrary(String user, String project, String exploratoryName, LibInstallDTO library, boolean reinstall) {
 		Optional<Document> opt = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						elemMatch(EXPLORATORY_LIBS,
 								and(eq(LIB_GROUP, library.getGroup()), eq(LIB_NAME, library.getName())))));
 		if (!opt.isPresent()) {
 			updateOne(USER_INSTANCES,
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					push(EXPLORATORY_LIBS, convertToBson(library)));
 			return true;
 		} else {
@@ -205,7 +210,7 @@
 				values.append(libraryFieldFilter(LIB_ERROR_MESSAGE), null);
 			}
 
-			updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName),
+			updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project),
 					elemMatch(EXPLORATORY_LIBS,
 							and(eq(LIB_GROUP, library.getGroup()), eq(LIB_NAME, library.getName())))),
 					new Document(SET, values));
@@ -217,22 +222,23 @@
 	 * Add the user's library for exploratory into database.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational.
 	 * @param library           library.
 	 * @return <b>true</b> if operation was successful, otherwise <b>false</b>.
 	 */
-	public boolean addLibrary(String user, String exploratoryName, String computationalName,
+	public boolean addLibrary(String user, String project, String exploratoryName, String computationalName,
 							  LibInstallDTO library, boolean reinstall) {
 
 		Optional<Document> opt = findOne(USER_INSTANCES,
-				and(runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+				and(runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 						eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_GROUP, library.getGroup()),
 						eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_NAME, library.getName())));
 
 		if (!opt.isPresent()) {
 			updateOne(USER_INSTANCES,
-					runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+					runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 					push(COMPUTATIONAL_LIBS + "." + computationalName, convertToBson(library)));
 			return true;
 		} else {
@@ -243,7 +249,7 @@
 			}
 
 			updateOne(USER_INSTANCES, and(
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_GROUP, library.getGroup()),
 					eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_NAME, library.getName())),
 
@@ -276,7 +282,7 @@
 				Document values = updateLibraryFields(lib, dto.getUptime());
 
 				updateOne(USER_INSTANCES,
-						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 								libraryConditionExploratory(lib.getGroup(), lib.getName())),
 						new Document(SET, values));
 			} catch (Exception e) {
@@ -292,7 +298,7 @@
 				Document values = updateComputationalLibraryFields(dto.getComputationalName(), lib, dto.getUptime());
 
 				updateOne(USER_INSTANCES,
-						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 								elemMatch(COMPUTATIONAL_LIBS + "." + dto.getComputationalName(),
 										libCondition(lib.getGroup(), lib.getName()))),
 						new Document(SET, values));
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDao.java
index 0eb86c0..2665d47 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDao.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDao.java
@@ -22,7 +22,6 @@
 import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
 import com.epam.dlab.dto.exploratory.ImageStatus;
 import com.epam.dlab.dto.exploratory.LibStatus;
-import com.epam.dlab.model.ResourceType;
 import com.epam.dlab.model.exploratory.Image;
 import com.epam.dlab.model.library.Library;
 
@@ -41,7 +40,7 @@
 
 	List<ImageInfoRecord> getImagesForProject(String project);
 
-	Optional<ImageInfoRecord> getImage(String user, String name);
+	Optional<ImageInfoRecord> getImage(String user, String name, String project, String endpoint);
 
-	List<Library> getLibraries(String user, String imageFullName, ResourceType resourceType, LibStatus status);
+	List<Library> getLibraries(String user, String imageFullName, String project, String endpoint, LibStatus status);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDaoImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDaoImpl.java
index c3e0ee1..808dfbe 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDaoImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ImageExploratoryDaoImpl.java
@@ -22,7 +22,6 @@
 import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
 import com.epam.dlab.dto.exploratory.ImageStatus;
 import com.epam.dlab.dto.exploratory.LibStatus;
-import com.epam.dlab.model.ResourceType;
 import com.epam.dlab.model.exploratory.Image;
 import com.epam.dlab.model.library.Library;
 import com.google.inject.Singleton;
@@ -60,7 +59,7 @@
 
 	@Override
 	public void updateImageFields(Image image) {
-		final Bson condition = userImageCondition(image.getUser(), image.getName());
+		final Bson condition = userImageCondition(image.getUser(), image.getName(), image.getProject(), image.getEndpoint());
 		final Document updatedFields = getUpdatedFields(image);
 		updateOne(MongoCollections.IMAGES, condition, new Document(SET, updatedFields));
 	}
@@ -80,24 +79,24 @@
 	}
 
 	@Override
-	public Optional<ImageInfoRecord> getImage(String user, String name) {
-		return findOne(MongoCollections.IMAGES, userImageCondition(user, name), ImageInfoRecord.class);
+	public Optional<ImageInfoRecord> getImage(String user, String name, String project, String endpoint) {
+		return findOne(MongoCollections.IMAGES, userImageCondition(user, name, project, endpoint), ImageInfoRecord.class);
 	}
 
 	@Override
 	@SuppressWarnings("unchecked")
-	public List<Library> getLibraries(String user, String imageFullName, ResourceType resourceType, LibStatus
-			status) {
-		return ((List<Document>) libDocument(user, imageFullName, status)
+	public List<Library> getLibraries(String user, String imageFullName, String project, String endpoint, LibStatus status) {
+		return ((List<Document>) libDocument(user, imageFullName, project, endpoint, status)
 				.orElse(emptyLibrariesDocument()).get(LIBRARIES))
 				.stream()
 				.map(d -> convertFromDocument(d, Library.class))
 				.collect(Collectors.toList());
 	}
 
-	private Optional<Document> libDocument(String user, String imageFullName, LibStatus status) {
+	private Optional<Document> libDocument(String user, String imageFullName, String project, String endpoint,
+										   LibStatus status) {
 		return findOne(MongoCollections.IMAGES,
-				imageLibraryCondition(user, imageFullName, status),
+				imageLibraryCondition(user, imageFullName, project, endpoint, status),
 				fields(include(LIBRARIES), excludeId()));
 	}
 
@@ -105,8 +104,9 @@
 		return new Document(LIBRARIES, Collections.emptyList());
 	}
 
-	private Bson imageLibraryCondition(String user, String imageFullName, LibStatus status) {
-		return and(eq(USER, user), eq(IMAGE_NAME, imageFullName),
+	private Bson imageLibraryCondition(String user, String imageFullName, String project, String endpoint,
+									   LibStatus status) {
+		return and(eq(USER, user), eq(IMAGE_NAME, imageFullName), eq(PROJECT, project), eq(ENDPOINT, endpoint),
 				elemMatch(LIBRARIES, eq(STATUS, status.name())));
 	}
 
@@ -130,8 +130,8 @@
 	}
 
 
-	private Bson userImageCondition(String user, String imageName) {
-		return and(eq(USER, user), eq(IMAGE_NAME, imageName));
+	private Bson userImageCondition(String user, String imageName, String project, String endpoint) {
+		return and(eq(USER, user), eq(IMAGE_NAME, imageName), eq(PROJECT, project), eq(ENDPOINT, endpoint));
 	}
 
 	private Bson imageProjectCondition(String image, String project) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
index 3d3fb36..f6e8bb6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
@@ -19,8 +19,8 @@
 
 package com.epam.dlab.backendapi.dao;
 
-import com.mongodb.BasicDBObject;
 import com.mongodb.client.model.IndexOptions;
+import com.mongodb.client.model.Indexes;
 import io.dropwizard.lifecycle.Managed;
 
 import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
@@ -28,12 +28,11 @@
 
 /** Creates the indexes for mongo collections. */
 public class IndexCreator extends BaseDAO implements Managed {
+    private static final String PROJECT_FIELD = "project";
     @Override
 	public void start() {
         mongoService.getCollection(USER_INSTANCES)
-        		.createIndex(new BasicDBObject(USER, 1)
-        		.append(EXPLORATORY_NAME, 2),
-                new IndexOptions().unique(true));
+                .createIndex(Indexes.ascending(USER, EXPLORATORY_NAME, PROJECT_FIELD), new IndexOptions().unique(true));
         // TODO: Make refactoring and append indexes for other mongo collections
     }
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/KeyDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/KeyDAO.java
deleted file mode 100644
index 8bce841..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/KeyDAO.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao;
-
-
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.dto.keyload.KeyLoadStatus;
-import com.epam.dlab.dto.keyload.UserKeyDTO;
-import com.epam.dlab.exceptions.DlabException;
-import com.mongodb.client.model.Updates;
-import org.bson.Document;
-
-import java.util.Date;
-import java.util.Optional;
-
-import static com.epam.dlab.backendapi.dao.MongoCollections.USER_EDGE;
-import static com.epam.dlab.backendapi.dao.MongoCollections.USER_KEYS;
-import static com.mongodb.client.model.Filters.*;
-import static com.mongodb.client.model.Projections.*;
-import static com.mongodb.client.model.Updates.set;
-
-/**
- * DAO for manage the user key.
- */
-public abstract class KeyDAO extends BaseDAO {
-	private static final String EDGE_STATUS = "edge_status";
-	private static final String KEY_CONTENT = "content";
-
-	/**
-	 * Write the status of user key to Mongo database.
-	 *
-	 * @param user   user name
-	 * @param status the status of user key.
-	 */
-	public void updateKey(String user, String status) {
-		updateOne(USER_KEYS, eq(ID, user), set(STATUS, status));
-	}
-
-	/**
-	 * Delete the user key from Mongo database.
-	 *
-	 * @param user user name
-	 */
-	public void deleteKey(String user) {
-		mongoService.getCollection(USER_KEYS).deleteOne(eq(ID, user));
-	}
-
-	/**
-	 * Inserts ('insertRequired' equals 'true') or updates ('insertRequired' equals 'false') the user key to/in Mongo
-	 * database.
-	 *
-	 * @param user           user name
-	 * @param content        key content
-	 * @param insertRequired true/false
-	 */
-	public void upsertKey(final String user, String content, boolean insertRequired) {
-		Document doc = new Document(SET,
-				new Document()
-						.append(ID, user)
-						.append(KEY_CONTENT, content)
-						.append(STATUS, insertRequired ? KeyLoadStatus.NEW.getStatus() :
-								KeyLoadStatus.SUCCESS.getStatus())
-						.append(TIMESTAMP, new Date()));
-		updateOne(USER_KEYS, eq(ID, user), doc, insertRequired);
-	}
-
-	/**
-	 * Finds and returns the user key with the specified status
-	 *
-	 * @param user   user name.
-	 * @param status key status
-	 */
-	public UserKeyDTO fetchKey(String user, KeyLoadStatus status) {
-		return findOne(USER_KEYS,
-				and(eq(ID, user), eq(STATUS, status.getStatus())),
-				UserKeyDTO.class)
-				.orElseThrow(() -> new DlabException(String.format("Key of user %s with status %s not found", user,
-						status.getStatus())));
-	}
-
-	/**
-	 * Store the EDGE of user to Mongo database.
-	 *
-	 * @param user     user name
-	 * @param edgeInfo the EDGE of user
-	 */
-	public void updateEdgeInfo(String user, EdgeInfo edgeInfo) {
-		Document d = new Document(SET,
-				convertToBson(edgeInfo)
-						.append(ID, user));
-		updateOne(USER_EDGE,
-				eq(ID, user),
-				d,
-				true);
-	}
-
-	public abstract EdgeInfo getEdgeInfo(String user);
-
-	public <T extends EdgeInfo> T getEdgeInfo(String user, Class<T> target, T defaultValue) {
-		return findOne(USER_EDGE,
-				eq(ID, user), target)
-				.orElse(defaultValue);
-	}
-
-	public abstract Optional<? extends EdgeInfo> getEdgeInfoWhereStatusIn(String user, UserInstanceStatus... statuses);
-
-	protected <T extends EdgeInfo> Optional<T> getEdgeInfoWhereStatusIn(String user, Class<T> target,
-																		UserInstanceStatus... statuses) {
-		return findOne(USER_EDGE,
-				and(eq(ID, user), in(EDGE_STATUS, statusList(statuses))),
-				target);
-	}
-
-	/**
-	 * Finds and returns the status of user key.
-	 *
-	 * @param user user name
-	 */
-	public KeyLoadStatus findKeyStatus(String user) {
-		return findOne(USER_KEYS, eq(ID, user), UserKeyDTO.class)
-				.map(UserKeyDTO::getStatus)
-				.map(KeyLoadStatus::findByStatus)
-				.orElse(KeyLoadStatus.NONE);
-	}
-
-	/**
-	 * Updates the status of EDGE node.
-	 *
-	 * @param user   user name
-	 * @param status status of EDGE node
-	 */
-	public void updateEdgeStatus(String user, String status) {
-		updateOne(USER_EDGE,
-				eq(ID, user),
-				Updates.set(EDGE_STATUS, status));
-	}
-
-	/**
-	 * Return the status of EDGE node.
-	 *
-	 * @param user user name
-	 */
-	public String getEdgeStatus(String user) {
-		Document d = findOne(USER_EDGE,
-				eq(ID, user),
-				fields(include(EDGE_STATUS), excludeId())).orElse(null);
-		return (d == null ? "" : d.getString(EDGE_STATUS));
-	}
-
-	public void removeEdge(String user) {
-		deleteOne(USER_EDGE, eq(ID, user));
-	}
-
-	public boolean edgeNodeExist(String user) {
-		return findOne(USER_EDGE, and(eq(ID, user), not(in(EDGE_STATUS, UserInstanceStatus.TERMINATING.toString(),
-				UserInstanceStatus.TERMINATED.toString()))))
-				.isPresent();
-	}
-
-	/**
-	 * Updates the field 'reupload_key_required' of EDGE node.
-	 *
-	 * @param user                user name
-	 * @param reuploadKeyRequired true/false
-	 * @param edgeStatuses        allowable edge statuses
-	 */
-	public void updateEdgeReuploadKey(String user, boolean reuploadKeyRequired, UserInstanceStatus... edgeStatuses) {
-		updateOne(USER_EDGE,
-				and(eq(ID, user), in(EDGE_STATUS, statusList(edgeStatuses))),
-				Updates.set(REUPLOAD_KEY_REQUIRED, reuploadKeyRequired));
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/MongoSetting.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/MongoSetting.java
index 4656b6b..bfa4e84 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/MongoSetting.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/MongoSetting.java
@@ -84,7 +84,7 @@
 	AZURE_VPC_NAME("azure_vpc_name"),
 	AZURE_SECURITY_GROUP_NAME("azure_security_group_name"),
 	AZURE_EDGE_INSTANCE_SIZE("edge_instance_size"),
-	AZURE_SSN_INSTANCE_SIZE("ssn_instance_size"),
+	SSN_INSTANCE_SIZE("ssn_instance_size"),
 	AZURE_DATA_LAKE_NAME_TAG("datalake_tag_name"),
 	AZURE_DATA_LAKE_CLIENT_ID("azure_client_id"),
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java
index e4a10b4..c94daae 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java
@@ -31,8 +31,6 @@
 public interface ProjectDAO {
 	List<ProjectDTO> getProjects();
 
-	List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status);
-
 	List<ProjectDTO> getProjectsWithEndpointStatusNotIn(UserInstanceStatus... statuses);
 
 	List<ProjectDTO> getUserProjects(UserInfo userInfo, boolean active);
@@ -47,6 +45,8 @@
 
 	Optional<ProjectDTO> get(String name);
 
+	List<ProjectDTO> getProjectsByEndpoint(String endpointName);
+
 	boolean update(ProjectDTO projectDTO);
 
 	void remove(String name);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java
index 5e79983..fc79656 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java
@@ -37,7 +37,11 @@
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
-import static com.mongodb.client.model.Filters.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.elemMatch;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.not;
 
 public class ProjectDAOImpl extends BaseDAO implements ProjectDAO {
 
@@ -45,6 +49,7 @@
 	private static final String GROUPS = "groups";
 	private static final String ENDPOINTS = "endpoints";
 	private static final String STATUS_FIELD = "status";
+	private static final String SHARED_IMAGE_FIELD = "sharedImageEnabled";
 	private static final String ENDPOINT_STATUS_FIELD = "endpoints." + STATUS_FIELD;
 	private static final String EDGE_INFO_FIELD = "edgeInfo";
 	private static final String ENDPOINT_FIELD = "endpoints.$.";
@@ -64,11 +69,6 @@
 	}
 
 	@Override
-	public List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status) {
-		return find(PROJECTS_COLLECTION, eq(STATUS_FIELD, status.toString()), ProjectDTO.class);
-	}
-
-	@Override
 	public List<ProjectDTO> getProjectsWithEndpointStatusNotIn(UserInstanceStatus... statuses) {
 		final List<String> statusList =
 				Arrays.stream(statuses).map(UserInstanceStatus::name).collect(Collectors.toList());
@@ -118,11 +118,17 @@
 	}
 
 	@Override
+	public List<ProjectDTO> getProjectsByEndpoint(String endpointName) {
+		return find(PROJECTS_COLLECTION, elemMatch(ENDPOINTS, eq("name", endpointName)), ProjectDTO.class);
+	}
+
+	@Override
 	public boolean update(ProjectDTO projectDTO) {
 		BasicDBObject updateProject = new BasicDBObject();
 		updateProject.put(GROUPS, projectDTO.getGroups());
 		updateProject.put(ENDPOINTS,
 				projectDTO.getEndpoints().stream().map(this::convertToBson).collect(Collectors.toList()));
+		updateProject.put(SHARED_IMAGE_FIELD, projectDTO.isSharedImageEnabled());
 		return updateOne(PROJECTS_COLLECTION, projectCondition(projectDTO.getName()),
 				new Document(SET, updateProject)).getMatchedCount() > 0L;
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
index db66de5..fc292dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
@@ -31,19 +31,35 @@
 import org.bson.Document;
 import org.bson.conversions.Bson;
 
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 import static com.epam.dlab.backendapi.dao.ComputationalDAO.COMPUTATIONAL_NAME;
-import static com.epam.dlab.backendapi.dao.ComputationalDAO.PROJECT;
 import static com.epam.dlab.backendapi.dao.ComputationalDAO.IMAGE;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ComputationalDAO.PROJECT;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
 import static com.epam.dlab.dto.base.DataEngineType.fromDockerImageName;
-import static com.mongodb.client.model.Filters.*;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.exists;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.lte;
+import static com.mongodb.client.model.Filters.ne;
+import static com.mongodb.client.model.Filters.or;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static java.util.stream.Collectors.toList;
 
 /**
@@ -78,12 +94,13 @@
 	 * Finds and returns the info of user's single scheduler job by exploratory name.
 	 *
 	 * @param user            user name.
+	 * @param project         project name
 	 * @param exploratoryName the name of exploratory.
 	 * @return scheduler job data.
 	 */
-	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobByUserAndExploratory(String user, String exploratoryName) {
+	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobByUserAndExploratory(String user, String project, String exploratoryName) {
 		return findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName), schedulerNotNullCondition()),
+				and(exploratoryCondition(user, exploratoryName, project), schedulerNotNullCondition()),
 				fields(include(SCHEDULER_DATA), excludeId()))
 				.map(d -> convertFromDocument((Document) d.get(SCHEDULER_DATA), SchedulerJobDTO.class));
 	}
@@ -92,16 +109,17 @@
 	 * Finds and returns the info of user's single scheduler job for computational resource.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName the name of computational resource.
 	 * @return scheduler job data.
 	 */
 
 	@SuppressWarnings("unchecked")
-	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobForCluster(String user, String exploratoryName,
+	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobForCluster(String user, String project, String exploratoryName,
 																	   String computationalName) {
 		return findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				fields(include(COMPUTATIONAL_RESOURCES), excludeId()))
 				.map(d -> (List<Document>) d.get(COMPUTATIONAL_RESOURCES))
 				.map(list -> list.stream().filter(d -> d.getString(COMPUTATIONAL_NAME).equals(computationalName))
@@ -140,7 +158,7 @@
 								eq(CONSIDER_INACTIVITY_FLAG, false)
 						)
 				),
-				fields(excludeId(), include(USER, EXPLORATORY_NAME, SCHEDULER_DATA))))
+				fields(excludeId(), include(USER, PROJECT, EXPLORATORY_NAME, SCHEDULER_DATA))))
 				.map(d -> convertFromDocument(d, SchedulerJobData.class))
 				.collect(toList());
 	}
@@ -176,7 +194,7 @@
 				and(schedulerNotNullCondition()));
 		return find(USER_INSTANCES,
 				and(eq(STATUS, exploratoryStatus.toString()), computationalSchedulerCondition),
-				fields(excludeId(), include(USER, EXPLORATORY_NAME, COMPUTATIONAL_RESOURCES)));
+				fields(excludeId(), include(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_RESOURCES)));
 	}
 
 	public void removeScheduler(String user, String exploratory) {
@@ -208,9 +226,9 @@
 
 	private SchedulerJobData toSchedulerData(Document userInstanceDocument, Document compResource) {
 		final String user = userInstanceDocument.getString(USER);
+		final String project = userInstanceDocument.getString(PROJECT);
 		final String exploratoryName = userInstanceDocument.getString(EXPLORATORY_NAME);
 		final String computationalName = compResource.getString(COMPUTATIONAL_NAME);
-		final String project = compResource.getString(PROJECT);
 		final SchedulerJobDTO schedulerData = convertFromDocument((Document) compResource.get(SCHEDULER_DATA),
 				SchedulerJobDTO.class);
 		return new SchedulerJobData(user, exploratoryName, computationalName, project, schedulerData);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SettingsDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SettingsDAO.java
index ff58d4a..d5770e1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SettingsDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SettingsDAO.java
@@ -241,8 +241,8 @@
 		return getSetting(AZURE_EDGE_INSTANCE_SIZE);
 	}
 
-	public String getAzureSsnInstanceSize() {
-		return getSetting(AZURE_SSN_INSTANCE_SIZE);
+	public String getSsnInstanceSize() {
+		return getSetting(SSN_INSTANCE_SIZE);
 	}
 
 	public String getAzureDataLakeNameTag() {
@@ -282,8 +282,8 @@
 		setSetting(AZURE_EDGE_INSTANCE_SIZE, azureEdgeInstanceSize);
 	}
 
-	public void setAzureSsnInstanceSize(String ssnInstanceSize) {
-		setSetting(AZURE_SSN_INSTANCE_SIZE, ssnInstanceSize);
+	public void setSsnInstanceSize(String ssnInstanceSize) {
+		setSetting(SSN_INSTANCE_SIZE, ssnInstanceSize);
 	}
 
 	public void setAzureDataLakeNameTag(String dataLakeNameTag) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java
index 0a4dde5..ae221f1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java
@@ -25,8 +25,6 @@
 
 	void updateUsers(String group, Set<String> users);
 
-	void removeUser(String group, String user);
-
 	void removeGroup(String groupId);
 
 	Set<String> getUserGroups(String user);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java
index 03a6f51..cc0da31 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java
@@ -44,11 +44,6 @@
 	}
 
 	@Override
-	public void removeUser(String group, String user) {
-		updateOne(USER_GROUPS, eq(ID, group), pull(USERS_FIELD, user));
-	}
-
-	@Override
 	public void removeGroup(String groupId) {
 		deleteOne(USER_GROUPS, eq(ID, groupId));
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java
index 33d00d9..48abb54 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java
@@ -21,6 +21,7 @@
 
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
+import com.epam.dlab.cloud.CloudProvider;
 
 import java.util.List;
 import java.util.Set;
@@ -28,20 +29,20 @@
 public interface UserRoleDao {
 	List<UserRoleDto> findAll();
 
-	void removeAll();
-
 	void insert(UserRoleDto dto);
 
 	void insert(List<UserRoleDto> roles);
 
 	boolean update(UserRoleDto dto);
 
+	void updateMissingRoles(CloudProvider cloudProvider);
+
 	boolean addGroupToRole(Set<String> groups, Set<String> roleIds);
 
-	boolean removeGroupFromRole(Set<String> groups, Set<String> roleIds);
-
 	void removeGroupWhenRoleNotIn(String group, Set<String> roleIds);
 
+	void removeUnnecessaryRoles(CloudProvider cloudProviderToBeRemoved, List<CloudProvider> remainingProviders);
+
 	void remove(String roleId);
 
 	boolean removeGroup(String groupId);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
index f271723..5bc845a 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
@@ -20,28 +20,47 @@
 
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
+import com.epam.dlab.cloud.CloudProvider;
+import com.epam.dlab.exceptions.DlabException;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.inject.Singleton;
 import com.mongodb.client.model.BsonField;
 import com.mongodb.client.result.UpdateResult;
 import org.bson.Document;
 import org.bson.conversions.Bson;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.Date;
 import java.util.List;
 import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_GROUPS;
-import static com.mongodb.client.model.Aggregates.*;
-import static com.mongodb.client.model.Filters.*;
+import static com.mongodb.client.model.Aggregates.group;
+import static com.mongodb.client.model.Aggregates.lookup;
+import static com.mongodb.client.model.Aggregates.project;
+import static com.mongodb.client.model.Aggregates.unwind;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.not;
+import static java.lang.String.format;
 import static java.util.stream.Collectors.toList;
 
 @Singleton
 public class UserRoleDaoImpl extends BaseDAO implements UserRoleDao {
-
+	private static final ObjectMapper MAPPER = new ObjectMapper();
+	private static final String ROLES_FILE_FORMAT = "/mongo/%s/mongo_roles.json";
 	private static final String USERS_FIELD = "users";
 	private static final String GROUPS_FIELD = "groups";
 	private static final String DESCRIPTION = "description";
+	private static final String TYPE = "type";
+	private static final String CLOUD = "cloud";
 	private static final String ROLES = "roles";
 	private static final String GROUPS = "$groups";
 	private static final String GROUP = "group";
@@ -58,11 +77,6 @@
 	}
 
 	@Override
-	public void removeAll() {
-		mongoService.getCollection(MongoCollections.ROLES).drop();
-	}
-
-	@Override
 	public void insert(UserRoleDto dto) {
 		insertOne(MongoCollections.ROLES, dto, dto.getId());
 	}
@@ -81,19 +95,49 @@
 	}
 
 	@Override
+	public void updateMissingRoles(CloudProvider cloudProvider) {
+		getUserRoleFromFile(cloudProvider)
+				.stream()
+				.peek(u -> u.setGroups(Collections.emptySet()))
+				.filter(u -> findAll()
+						.stream()
+						.map(UserRoleDto::getId)
+						.noneMatch(id -> id.equals(u.getId())))
+				.forEach(this::insert);
+
+		addGroupToRole(aggregateRolesByGroup()
+						.stream()
+						.map(UserGroupDto::getGroup)
+						.collect(Collectors.toSet()),
+				getDefaultShapes(cloudProvider));
+	}
+
+	@Override
 	public boolean addGroupToRole(Set<String> groups, Set<String> roleIds) {
 		return conditionMatched(updateMany(MongoCollections.ROLES, in(ID, roleIds), addToSet(GROUPS_FIELD,
 				groups)));
 	}
 
 	@Override
-	public boolean removeGroupFromRole(Set<String> groups, Set<String> roleIds) {
-		return conditionMatched(updateMany(MongoCollections.ROLES, in(ID, roleIds), pullAll(GROUPS_FIELD, groups)));
+	public void removeGroupWhenRoleNotIn(String group, Set<String> roleIds) {
+		updateMany(MongoCollections.ROLES, not(in(ID, roleIds)), pull(GROUPS_FIELD, group));
 	}
 
 	@Override
-	public void removeGroupWhenRoleNotIn(String group, Set<String> roleIds) {
-		updateMany(MongoCollections.ROLES, not(in(ID, roleIds)), pull(GROUPS_FIELD, group));
+	public void removeUnnecessaryRoles(CloudProvider cloudProviderToBeRemoved, List<CloudProvider> remainingProviders) {
+		if (remainingProviders.contains(cloudProviderToBeRemoved)) {
+			return;
+		}
+
+		List<UserRoleDto> remainingRoles = new ArrayList<>();
+		remainingProviders.forEach(p -> remainingRoles.addAll(getUserRoleFromFile(p)));
+
+		getUserRoleFromFile(cloudProviderToBeRemoved).stream()
+				.map(UserRoleDto::getId)
+				.filter(u -> remainingRoles.stream()
+						.map(UserRoleDto::getId)
+						.noneMatch(id -> id.equals(u)))
+				.forEach(this::remove);
 	}
 
 	@Override
@@ -123,9 +167,35 @@
 				.collect(toList());
 	}
 
+	private List<UserRoleDto> getUserRoleFromFile(CloudProvider cloudProvider) {
+		try (InputStream is = getClass().getResourceAsStream(format(ROLES_FILE_FORMAT, cloudProvider.getName()))) {
+			return MAPPER.readValue(is, new TypeReference<List<UserRoleDto>>() {
+			});
+		} catch (IOException e) {
+			throw new IllegalStateException("Can not marshall dlab roles due to: " + e.getMessage());
+		}
+	}
+
+	private Set<String> getDefaultShapes(CloudProvider cloudProvider) {
+		if (cloudProvider == CloudProvider.AWS) {
+			return Stream.of("nbShapes_t2.medium_fetching", "compShapes_c4.xlarge_fetching")
+					.collect(Collectors.toSet());
+		} else if (cloudProvider == CloudProvider.GCP) {
+			return Stream.of("compShapes_n1-standard-2_fetching", "nbShapes_n1-standard-2_fetching")
+					.collect(Collectors.toSet());
+		} else if (cloudProvider == CloudProvider.AZURE) {
+			return Stream.of("nbShapes_Standard_E4s_v3_fetching", "compShapes_Standard_E4s_v3_fetching")
+					.collect(Collectors.toSet());
+		} else {
+			throw new DlabException("Unsupported cloud provider " + cloudProvider);
+		}
+	}
+
 	private Document roleDocument() {
 		return new Document().append(ID, "$" + ID)
 				.append(DESCRIPTION, "$" + DESCRIPTION)
+				.append(TYPE, "$" + TYPE)
+				.append(CLOUD, "$" + CLOUD)
 				.append(USERS_FIELD, "$" + USERS_FIELD)
 				.append(EXPLORATORY_SHAPES_FIELD, "$" + EXPLORATORY_SHAPES_FIELD)
 				.append(PAGES_FIELD, "$" + PAGES_FIELD)
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsBillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsBillingDAO.java
deleted file mode 100644
index 5205cc4..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsBillingDAO.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.aws;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.aws.AwsBillingFilter;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-
-import java.util.Collections;
-import java.util.List;
-
-import static com.epam.dlab.model.aws.ReportLine.*;
-import static com.mongodb.client.model.Accumulators.*;
-import static com.mongodb.client.model.Aggregates.group;
-import static com.mongodb.client.model.Aggregates.sort;
-
-/**
- * DAO for user billing.
- */
-public class AwsBillingDAO extends BaseBillingDAO<AwsBillingFilter> {
-
-    public static final String DLAB_RESOURCE_TYPE = "dlab_resource_type";
-    public static final String USAGE_DATE_START = "from";
-    public static final String USAGE_DATE_END = "to";
-    public static final String TAG_RESOURCE_ID = "tag_resource_id";
-
-    @Override
-    protected Bson sortCriteria() {
-        return sort(new Document(ID + "." + USER, 1)
-                .append(ID + "." + FIELD_DLAB_ID, 1)
-                .append(ID + "." + DLAB_RESOURCE_TYPE, 1)
-                .append(ID + "." + FIELD_PRODUCT, 1));
-    }
-
-    @Override
-    protected Bson groupCriteria() {
-        return group(getGroupingFields(USER, FIELD_DLAB_ID, DLAB_RESOURCE_TYPE, FIELD_PRODUCT, FIELD_RESOURCE_TYPE,
-                FIELD_CURRENCY_CODE, FIELD_PROJECT),
-                sum(FIELD_COST, "$" + FIELD_COST),
-                min(MongoKeyWords.USAGE_FROM, "$" + FIELD_USAGE_DATE),
-                max(MongoKeyWords.USAGE_TO, "$" + FIELD_USAGE_DATE));
-    }
-
-    @Override
-    protected List<Bson> cloudMatchCriteria(AwsBillingFilter filter) {
-        return Collections.emptyList();
-    }
-
-
-    @Override
-    protected String getSsnShape() {
-        return "t2.medium";
-    }
-
-
-}
\ No newline at end of file
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsKeyDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsKeyDao.java
deleted file mode 100644
index 745a2cd..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsKeyDao.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.aws;
-
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.aws.edge.EdgeInfoAws;
-import lombok.extern.slf4j.Slf4j;
-
-import java.util.Optional;
-
-@Slf4j
-public class AwsKeyDao extends KeyDAO {
-
-	public AwsKeyDao() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	@Override
-	public EdgeInfoAws getEdgeInfo(String user) {
-		return super.getEdgeInfo(user, EdgeInfoAws.class, new EdgeInfoAws());
-	}
-
-	@Override
-	public Optional<EdgeInfoAws> getEdgeInfoWhereStatusIn(String user, UserInstanceStatus... statuses) {
-		return super.getEdgeInfoWhereStatusIn(user, EdgeInfoAws.class, statuses);
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureBillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureBillingDAO.java
deleted file mode 100644
index 863f3eb..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureBillingDAO.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.azure;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.azure.AzureBillingFilter;
-import com.epam.dlab.billing.DlabResourceType;
-import com.google.inject.Singleton;
-import com.mongodb.client.model.Accumulators;
-import com.mongodb.client.model.Aggregates;
-import com.mongodb.client.model.Filters;
-import com.mongodb.client.model.Sorts;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-
-import java.util.Collections;
-import java.util.List;
-
-@Singleton
-@Slf4j
-public class AzureBillingDAO extends BaseBillingDAO<AzureBillingFilter> {
-	public static final String SIZE = "size";
-
-	@Override
-	protected List<Bson> cloudMatchCriteria(AzureBillingFilter filter) {
-		if (!filter.getCategory().isEmpty()) {
-			return Collections.singletonList(Filters.in(MongoKeyWords.METER_CATEGORY, filter.getCategory()));
-		} else {
-			return Collections.emptyList();
-		}
-	}
-
-	@Override
-	protected Bson groupCriteria() {
-		return Aggregates.group(getGroupingFields(
-				MongoKeyWords.DLAB_USER,
-				MongoKeyWords.DLAB_ID,
-				MongoKeyWords.RESOURCE_TYPE,
-				MongoKeyWords.METER_CATEGORY,
-				MongoKeyWords.CURRENCY_CODE,
-				FIELD_PROJECT),
-				Accumulators.sum(MongoKeyWords.COST, MongoKeyWords.prepend$(MongoKeyWords.COST)),
-				Accumulators.min(MongoKeyWords.USAGE_FROM, MongoKeyWords.prepend$(MongoKeyWords.USAGE_DAY)),
-				Accumulators.max(MongoKeyWords.USAGE_TO, MongoKeyWords.prepend$(MongoKeyWords.USAGE_DAY))
-		);
-	}
-
-	@Override
-	protected Bson sortCriteria() {
-		return Aggregates.sort(Sorts.ascending(
-				MongoKeyWords.prependId(MongoKeyWords.DLAB_USER),
-				MongoKeyWords.prependId(MongoKeyWords.DLAB_ID),
-				MongoKeyWords.prependId(MongoKeyWords.RESOURCE_TYPE),
-				MongoKeyWords.prependId(MongoKeyWords.METER_CATEGORY)));
-	}
-
-	@Override
-	protected String getServiceBaseName() {
-		return settings.getServiceBaseName().replace("_", "-").toLowerCase();
-	}
-
-	@Override
-	protected String getSsnShape() {
-		return settings.getAzureSsnInstanceSize();
-	}
-
-	@Override
-	protected String shapeFieldName() {
-		return SIZE;
-	}
-
-	@Override
-	protected String dlabIdFieldName() {
-		return MongoKeyWords.DLAB_ID;
-	}
-
-	@Override
-	protected String productFieldName() {
-		return MongoKeyWords.METER_CATEGORY;
-	}
-
-	@Override
-	protected String costFieldName() {
-		return MongoKeyWords.COST_STRING;
-	}
-
-	@Override
-	protected String usageDateFromFieldName() {
-		return MongoKeyWords.USAGE_FROM;
-	}
-
-	@Override
-	protected String usageDateToFieldName() {
-		return MongoKeyWords.USAGE_TO;
-	}
-
-	@Override
-	protected String currencyCodeFieldName() {
-		return MongoKeyWords.CURRENCY_CODE;
-	}
-
-	@Override
-	protected String resourceType(Document id) {
-		return DlabResourceType.getResourceTypeName(id.getString(MongoKeyWords.RESOURCE_TYPE));
-	}
-
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureKeyDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureKeyDao.java
deleted file mode 100644
index 4a7ec1b..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureKeyDao.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.azure;
-
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.azure.edge.EdgeInfoAzure;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-
-import java.util.Optional;
-
-@Slf4j
-@Singleton
-public class AzureKeyDao extends KeyDAO {
-
-	public AzureKeyDao() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	@Override
-	public EdgeInfoAzure getEdgeInfo(String user) {
-		return super.getEdgeInfo(user, EdgeInfoAzure.class, new EdgeInfoAzure());
-	}
-
-	@Override
-	public Optional<EdgeInfoAzure> getEdgeInfoWhereStatusIn(String user, UserInstanceStatus... statuses) {
-		return super.getEdgeInfoWhereStatusIn(user, EdgeInfoAzure.class, statuses);
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpBillingDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpBillingDao.java
deleted file mode 100644
index b9d1488..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpBillingDao.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.gcp;
-
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.gcp.GcpBillingFilter;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-
-import java.util.Collections;
-import java.util.List;
-
-import static com.epam.dlab.MongoKeyWords.USAGE_FROM;
-import static com.epam.dlab.MongoKeyWords.USAGE_TO;
-import static com.epam.dlab.backendapi.dao.aws.AwsBillingDAO.DLAB_RESOURCE_TYPE;
-import static com.epam.dlab.model.aws.ReportLine.*;
-import static com.mongodb.client.model.Accumulators.*;
-import static com.mongodb.client.model.Aggregates.group;
-import static com.mongodb.client.model.Aggregates.sort;
-
-public class GcpBillingDao extends BaseBillingDAO<GcpBillingFilter> {
-    @Override
-    protected Bson sortCriteria() {
-        return sort(new Document(ID + "." + USER, 1)
-                .append(ID + "." + FIELD_DLAB_ID, 1)
-                .append(ID + "." + FIELD_PRODUCT, 1));
-    }
-
-    @Override
-    protected Bson groupCriteria() {
-        return group(getGroupingFields(USER, FIELD_DLAB_ID, DLAB_RESOURCE_TYPE, FIELD_PRODUCT,
-                currencyCodeFieldName(), FIELD_PROJECT),
-                sum(FIELD_COST, "$" + FIELD_COST),
-                min(USAGE_FROM, "$" + FIELD_USAGE_DATE),
-                max(USAGE_TO, "$" + FIELD_USAGE_DATE)
-        );
-    }
-
-    @Override
-    protected List<Bson> cloudMatchCriteria(GcpBillingFilter filter) {
-        return Collections.emptyList();
-    }
-
-
-    @Override
-    protected String getSsnShape() {
-        return "t2.medium";
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpKeyDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpKeyDao.java
deleted file mode 100644
index 7057f8d..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpKeyDao.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.gcp;
-
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.gcp.edge.EdgeInfoGcp;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-
-import java.util.Optional;
-
-
-@Slf4j
-@Singleton
-public class GcpKeyDao extends KeyDAO {
-	public GcpKeyDao() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	@Override
-	public EdgeInfoGcp getEdgeInfo(String user) {
-		return super.getEdgeInfo(user, EdgeInfoGcp.class, new EdgeInfoGcp());
-	}
-
-	@Override
-	public Optional<EdgeInfoGcp> getEdgeInfoWhereStatusIn(String user, UserInstanceStatus... statuses) {
-		return super.getEdgeInfoWhereStatusIn(user, EdgeInfoGcp.class, statuses);
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BaseShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BaseShape.java
deleted file mode 100644
index e9cbd67..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BaseShape.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.backendapi.service.ShapeFormat;
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.AllArgsConstructor;
-import lombok.Data;
-import lombok.NoArgsConstructor;
-
-import java.util.Map;
-
-@Data
-@AllArgsConstructor
-@NoArgsConstructor
-public class BaseShape implements ShapeFormat {
-    protected String shape;
-    protected UserInstanceStatus status;
-    protected Map<String, String> tags;
-
-    @Override
-    public String format() {
-        return shape;
-    }
-}
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/Project.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BillingReport.java
similarity index 62%
copy from services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/Project.java
copy to services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BillingReport.java
index 0b40235..2bb2062 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/Project.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BillingReport.java
@@ -17,25 +17,29 @@
  * under the License.
  */
 
-package com.epam.dlab.billing.gcp.documents;
+package com.epam.dlab.backendapi.domain;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
+import lombok.Builder;
 import lombok.Data;
-import org.springframework.data.mongodb.core.mapping.Document;
 
+import java.time.LocalDate;
 import java.util.List;
 
-@Document(collection = "Projects")
 @Data
-public class Project {
-
-	@JsonProperty("name")
-	private String name;
-	private List<Endpoint> endpoints;
-
-
-	@Data
-	public class Endpoint {
-		private final String name;
-	}
+@Builder
+public class BillingReport {
+    private String sbn;
+    private String name;
+    @JsonProperty("report_lines")
+    private List<BillingReportLine> reportLines;
+    @JsonProperty("from")
+    private LocalDate usageDateFrom;
+    @JsonProperty("to")
+    private LocalDate usageDateTo;
+    @JsonProperty("total_cost")
+    private double totalCost;
+    private String currency;
+    @JsonProperty("is_full")
+    private boolean isFull;
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/BillingData.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BillingReportLine.java
similarity index 61%
rename from services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/BillingData.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BillingReportLine.java
index 32a98ed..a9cdd12 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/BillingData.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BillingReportLine.java
@@ -17,54 +17,40 @@
  * under the License.
  */
 
-package com.epam.dlab.billing.gcp.model;
+package com.epam.dlab.backendapi.domain;
 
+import com.epam.dlab.dto.UserInstanceStatus;
+import com.epam.dlab.dto.billing.BillingResourceType;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Builder;
 import lombok.Data;
-import org.springframework.data.annotation.Id;
-import org.springframework.data.mongodb.core.mapping.Document;
-import org.springframework.data.mongodb.core.mapping.Field;
 
 import java.time.LocalDate;
 
 @Data
 @Builder
-@Document(collection = "billing")
-public class BillingData {
-    @Id
-    private String id;
-    private String user;
-    @Field("resource_name")
-    private String displayName;
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class BillingReportLine {
+    private String dlabId;
+    private String application;
+    @JsonProperty("resource_name")
     private String resourceName;
-    @Field("from")
+    private String project;
+    private String endpoint;
+    private String user;
+    @JsonProperty("from")
     private LocalDate usageDateFrom;
-    @Field("to")
+    @JsonProperty("to")
     private LocalDate usageDateTo;
-    @Field("usage_date")
     private String usageDate;
     private String product;
     private String usageType;
     private Double cost;
-    @Field("currency_code")
     private String currency;
-    private String project;
+    @JsonProperty("resource_type")
+    private BillingResourceType resourceType;
+    private UserInstanceStatus status;
+    private String shape;
     private String exploratoryName;
-    private String computationalName;
-    @Field("dlab_id")
-    private String dlabId;
-    @Field("dlab_resource_type")
-    private ResourceType resourceType;
-
-
-    public enum ResourceType {
-        EDGE,
-        SSN,
-        SHARED_BUCKET,
-        SSN_BUCKET,
-        EDGE_BUCKET,
-        VOLUME,
-        EXPLORATORY,
-        COMPUTATIONAL
-    }
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
index cf4aba1..44f8eef 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
@@ -19,6 +19,7 @@
 
 package com.epam.dlab.backendapi.domain;
 
+import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Data;
 
 import javax.validation.constraints.NotNull;
@@ -33,8 +34,10 @@
 	private final Set<String> groups;
 	@NotNull final Set<String> endpoints;
 	@NotNull
-	@Pattern(regexp = "^ssh-.*\\n?", message = "Wrong key format. Key should be in openSSH format")
+	@Pattern(regexp = "^ssh-.*\\n?", message = "format is incorrect. Please use the openSSH format")
 	private final String key;
 	@NotNull
 	private final String tag;
+	@JsonProperty("shared_image_enabled")
+	private boolean sharedImageEnabled;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineServiceShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineServiceShape.java
deleted file mode 100644
index 0df9b9e..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineServiceShape.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.backendapi.service.ShapeFormat;
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Map;
-
-
-@Slf4j
-public class DataEngineServiceShape extends BaseShape implements ShapeFormat {
-    private static final String DES_NAME_FORMAT = "Master: %s%sSlave:  %d x %s";
-    private String slaveCount;
-    private String slaveShape;
-
-    @Builder
-    public DataEngineServiceShape(String shape, UserInstanceStatus status, String slaveCount, String slaveShape,
-                                  Map<String, String> tags) {
-        super(shape, status, tags);
-        this.slaveCount = slaveCount;
-        this.slaveShape = slaveShape;
-    }
-
-    @Override
-    public String format() {
-        Integer count;
-        try {
-            count = Integer.valueOf(slaveCount);
-        } catch (NumberFormatException e) {
-            log.error("Cannot parse string {} to integer", slaveCount);
-            return StringUtils.EMPTY;
-        }
-        return String.format(DES_NAME_FORMAT, shape, System.lineSeparator(), count - 1, slaveShape);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineShape.java
deleted file mode 100644
index 7575268..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineShape.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.backendapi.service.ShapeFormat;
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Map;
-
-@Slf4j
-public class DataEngineShape extends BaseShape implements ShapeFormat {
-    private static final String DE_NAME_FORMAT = "%d x %s";
-    private String slaveCount;
-
-
-    @Builder
-    public DataEngineShape(String shape, UserInstanceStatus status, String slaveCount, Map<String, String> tags) {
-        super(shape, status, tags);
-        this.slaveCount = slaveCount;
-    }
-
-    @Override
-    public String format() {
-        Integer count;
-        try {
-            count = Integer.valueOf(slaveCount);
-        } catch (NumberFormatException e) {
-            log.error("Cannot parse string {} to integer", slaveCount);
-            return StringUtils.EMPTY;
-        }
-        return String.format(DE_NAME_FORMAT, count, shape);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
index cefe896..f288a68 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
@@ -19,20 +19,33 @@
 
 package com.epam.dlab.backendapi.domain;
 
+import com.epam.dlab.cloud.CloudProvider;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Data;
+import org.hibernate.validator.constraints.NotBlank;
+import org.hibernate.validator.constraints.NotEmpty;
+import org.hibernate.validator.constraints.URL;
 
-import javax.annotation.RegEx;
 
 @Data
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class EndpointDTO {
 
+	private static final String URL_REGEXP_VALIDATION = "^(http(s)?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]";
+	@NotBlank(message = "field cannot be empty")
 	private final String name;
-
+	@URL(regexp = URL_REGEXP_VALIDATION, message = "field is in improper format!")
 	private final String url;
+	@NotBlank(message = "field cannot be empty")
 	private final String account;
 	@JsonProperty("endpoint_tag")
 	private final String tag;
+	private final EndpointStatus status;
+	private final CloudProvider cloudProvider;
+
+	public enum EndpointStatus {
+		ACTIVE,
+		INACTIVE
+	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectManagingDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointResourcesDTO.java
similarity index 77%
rename from services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectManagingDTO.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointResourcesDTO.java
index 0eb0f69..85f4418 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectManagingDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointResourcesDTO.java
@@ -19,17 +19,15 @@
 
 package com.epam.dlab.backendapi.domain;
 
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.epam.dlab.dto.UserInstanceDTO;
 import lombok.AllArgsConstructor;
 import lombok.Data;
 
+import java.util.List;
 
 @Data
-@JsonIgnoreProperties(ignoreUnknown = true)
 @AllArgsConstructor
-public class ProjectManagingDTO {
-    private String name;
-    private final Integer budget;
-    private boolean canBeStopped;
-    private boolean canBeTerminated;
-}
\ No newline at end of file
+public class EndpointResourcesDTO {
+    private List<UserInstanceDTO> exploratories;
+    private List<ProjectDTO> projects;
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointShape.java
deleted file mode 100644
index 0b0af9d..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointShape.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-
-import java.util.Collections;
-
-public class EndpointShape extends BaseShape {
-
-    @Builder
-    public EndpointShape(String shape, UserInstanceStatus status) {
-        super(shape, status, Collections.emptyMap());
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EnvStatusListener.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EnvStatusListener.java
deleted file mode 100644
index ab1f9a8..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EnvStatusListener.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.EnvDAO;
-import com.epam.dlab.backendapi.service.EndpointService;
-import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.constants.ServiceConsts;
-import com.epam.dlab.dto.UserEnvironmentResources;
-import com.epam.dlab.dto.status.EnvResourceList;
-import com.epam.dlab.rest.client.RESTService;
-import com.epam.dlab.rest.contracts.InfrasctructureAPI;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalNotification;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.google.inject.name.Named;
-import io.dropwizard.lifecycle.Managed;
-import lombok.extern.slf4j.Slf4j;
-
-import java.util.Map;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Send requests to the docker to check environment status.
- */
-@Singleton
-@Slf4j
-public class EnvStatusListener implements Managed {
-
-	private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
-
-	private final Cache<String, UserInfo> sessions;
-	private final EnvDAO dao;
-	private final RESTService provisioningService;
-	private final StatusChecker statusChecker = new StatusChecker();
-	private final long checkEnvStatusTimeout;
-	private final RequestBuilder requestBuilder;
-
-	@Inject
-	private RequestId requestId;
-	@Inject
-	private EndpointService endpointService;
-
-	@Inject
-	public EnvStatusListener(SelfServiceApplicationConfiguration configuration, EnvDAO dao,
-							 @Named(ServiceConsts.PROVISIONING_SERVICE_NAME) RESTService provisioningService,
-							 RequestBuilder requestBuilder) {
-
-		this.sessions = CacheBuilder.newBuilder()
-				.expireAfterAccess(configuration.getInactiveUserTimeoutMillSec(), TimeUnit.MILLISECONDS)
-				.removalListener((RemovalNotification<String, Object> notification) ->
-						log.info("User {} session is removed", notification.getKey()))
-				.build();
-
-		this.dao = dao;
-		this.provisioningService = provisioningService;
-		this.checkEnvStatusTimeout = configuration.getCheckEnvStatusTimeout().toMilliseconds();
-		this.requestBuilder = requestBuilder;
-	}
-
-	@Override
-	public void start() {
-		executorService.scheduleAtFixedRate(new StatusChecker(), checkEnvStatusTimeout, checkEnvStatusTimeout,
-				TimeUnit.MILLISECONDS);
-	}
-
-	@Override
-	public void stop() throws Exception {
-		statusChecker.shouldStop = true;
-		if (!executorService.awaitTermination(10, TimeUnit.SECONDS)) {
-			executorService.shutdownNow();
-		}
-	}
-
-	public void registerSession(UserInfo userInfo) {
-		UserInfo ui = getSession(userInfo.getName());
-		log.info("Register session(existing = {}) for {}", ui != null, userInfo.getName());
-		sessions.put(userInfo.getName(), userInfo);
-	}
-
-	public void unregisterSession(UserInfo userInfo) {
-		log.info("Invalidate session for {}", userInfo.getName());
-		sessions.invalidate(userInfo.getName());
-	}
-
-	public UserInfo getSession(String username) {
-		return sessions.getIfPresent(username);
-	}
-
-	/**
-	 * Scheduled @{@link Runnable} that verifies status of users' resources
-	 */
-	private class StatusChecker implements Runnable {
-		private volatile boolean shouldStop = false;
-
-		@Override
-		public void run() {
-
-			log.debug("Start checking environment statuses");
-
-			sessions.cleanUp();
-
-			for (Map.Entry<String, UserInfo> entry : sessions.asMap().entrySet()) {
-				try {
-					if (!shouldStop) {
-						checkStatusThroughProvisioningService(entry.getValue());
-					} else {
-						log.info("Stopping env status listener");
-					}
-				} catch (RuntimeException e) {
-					log.error("Cannot check env status for user {}", entry.getKey(), e);
-				}
-			}
-		}
-
-		/**
-		 * Sends request to docker to check the status of user environment.
-		 *
-		 * @param userInfo username
-		 */
-		private void checkStatusThroughProvisioningService(UserInfo userInfo) {
-
-			final Map<String, EnvResourceList> envResources = dao.findEnvResources(userInfo.getName());
-			UserEnvironmentResources dto = requestBuilder.newUserEnvironmentStatus(userInfo);
-
-			envResources.forEach((endpoint, resourceList) -> {
-				log.trace("EnvStatus listener check status for user {} with resource list {}", userInfo.getName(),
-						resourceList);
-				dto.withResourceList(resourceList);
-				log.trace("Ask docker for the status of resources for user {}: {}", userInfo.getName(), dto);
-				String uuid =
-						provisioningService.post(endpointService.get(endpoint).getUrl() + InfrasctructureAPI.INFRASTRUCTURE_STATUS,
-								userInfo.getAccessToken(),
-								dto, String.class);
-				requestId.put(userInfo.getName(), uuid);
-
-			});
-		}
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryLibCache.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryLibCache.java
index 13c6cda..0faf66c 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryLibCache.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryLibCache.java
@@ -223,18 +223,19 @@
 			String uuid;
 			if (userInstance.getResources() != null && !userInstance.getResources().isEmpty()) {
 				UserComputationalResource userComputationalResource = userInstance.getResources().get(0);
+				EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 				LibListComputationalDTO dto = requestBuilder.newLibComputationalList(userInfo, userInstance,
-						userComputationalResource);
-				uuid = provisioningService.post(endpointService
-								.get(userInstance.getEndpoint()).getUrl() + ComputationalAPI.COMPUTATIONAL_LIB_LIST,
+						userComputationalResource, endpointDTO);
+
+				uuid = provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_LIB_LIST,
 						userInfo.getAccessToken(),
 						dto, String.class);
 			} else {
-				ExploratoryActionDTO<?> dto = requestBuilder.newLibExploratoryList(userInfo, userInstance);
-				uuid =
-						provisioningService.post(endpointService.get(userInstance.getEndpoint()).getUrl() + ExploratoryAPI.EXPLORATORY_LIB_LIST,
-								userInfo.getAccessToken(), dto,
-								String.class);
+				EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
+				ExploratoryActionDTO<?> dto = requestBuilder.newLibExploratoryList(userInfo, userInstance, endpointDTO);
+				uuid = provisioningService.post(endpointDTO.getUrl() + ExploratoryAPI.EXPLORATORY_LIB_LIST,
+						userInfo.getAccessToken(), dto,
+						String.class);
 			}
 
 			requestId.put(userInfo.getName(), uuid);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryShape.java
deleted file mode 100644
index 169fda1..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryShape.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-
-import java.util.Map;
-
-public class ExploratoryShape extends BaseShape {
-
-    @Builder
-    public ExploratoryShape(String shape, UserInstanceStatus status, Map<String, String> tags) {
-        super(shape, status, tags);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
index 90965cd..72d6697 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
@@ -21,6 +21,8 @@
 
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import lombok.AllArgsConstructor;
+import lombok.Builder;
 import lombok.Data;
 
 import javax.validation.constraints.NotNull;
@@ -29,6 +31,8 @@
 import java.util.Set;
 
 @Data
+@Builder
+@AllArgsConstructor
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class ProjectDTO {
 	@NotNull
@@ -36,13 +40,13 @@
 	@NotNull
 	private final Set<String> groups;
 	@NotNull
-	@Pattern(regexp = "^ssh-.*\\n", message = "Wrong key format. Key should be in openSSH format")
+	@Pattern(regexp = "^ssh-.*\\n", message = "format is incorrect. Please use the openSSH format")
 	private final String key;
 	@NotNull
 	private final String tag;
 	private final Integer budget;
 	private final List<ProjectEndpointDTO> endpoints;
-	private boolean useSharedImage;
+	private final boolean sharedImageEnabled;
 
 
 	public enum Status {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/SsnShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/SsnShape.java
deleted file mode 100644
index b4aba61..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/SsnShape.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-
-import java.util.Collections;
-
-public class SsnShape extends BaseShape {
-
-    @Builder
-    public SsnShape(String shape, UserInstanceStatus status) {
-        super(shape, status, Collections.emptyMap());
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/UpdateProjectDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/UpdateProjectDTO.java
index 20728c1..4622ac5 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/UpdateProjectDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/UpdateProjectDTO.java
@@ -20,6 +20,7 @@
 package com.epam.dlab.backendapi.domain;
 
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Data;
 
 import javax.validation.constraints.NotNull;
@@ -34,5 +35,6 @@
 	private final Set<String> endpoints;
 	@NotNull
 	private final Set<String> groups;
-
+	@JsonProperty("shared_image_enabled")
+	private final boolean sharedImageEnabled;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/MongoStartupListener.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/MongoStartupListener.java
index 7424777..9d9c9f7 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/MongoStartupListener.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/MongoStartupListener.java
@@ -20,6 +20,7 @@
 package com.epam.dlab.backendapi.dropwizard.listeners;
 
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
+import com.epam.dlab.backendapi.dao.EndpointDAO;
 import com.epam.dlab.backendapi.dao.SettingsDAO;
 import com.epam.dlab.backendapi.dao.UserRoleDao;
 import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
@@ -33,9 +34,16 @@
 
 import java.io.IOException;
 import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
 
 import static java.lang.String.format;
+import static java.util.Comparator.comparing;
+import static java.util.stream.Collectors.collectingAndThen;
+import static java.util.stream.Collectors.toCollection;
 
 
 @Slf4j
@@ -46,22 +54,22 @@
 	private final UserRoleDao userRoleDao;
 	private final SelfServiceApplicationConfiguration configuration;
 	private final SettingsDAO settingsDAO;
+	private final EndpointDAO endpointDAO;
 
 	@Inject
-	public MongoStartupListener(UserRoleDao userRoleDao,
-								SelfServiceApplicationConfiguration configuration, SettingsDAO settingsDAO) {
+	public MongoStartupListener(UserRoleDao userRoleDao, SelfServiceApplicationConfiguration configuration,
+								SettingsDAO settingsDAO, EndpointDAO endpointDAO) {
 		this.userRoleDao = userRoleDao;
 		this.configuration = configuration;
 		this.settingsDAO = settingsDAO;
+		this.endpointDAO = endpointDAO;
 	}
 
 	@Override
 	public void serverStarted(Server server) {
 		settingsDAO.setServiceBaseName(configuration.getServiceBaseName());
 		settingsDAO.setConfOsFamily(configuration.getOs());
-		if (configuration.getCloudProvider() == CloudProvider.AZURE) {
-			settingsDAO.setAzureSsnInstanceSize(configuration.getSsnInstanceSize());
-		}
+		settingsDAO.setSsnInstanceSize(configuration.getSsnInstanceSize());
 		if (userRoleDao.findAll().isEmpty()) {
 			log.debug("Populating DLab roles into database");
 			userRoleDao.insert(getRoles());
@@ -71,8 +79,14 @@
 	}
 
 	private List<UserRoleDto> getRoles() {
-		try (InputStream is = getClass().getResourceAsStream(format(ROLES_FILE_FORMAT,
-				configuration.getCloudProvider().getName()))) {
+		Set<UserRoleDto> userRoles = new HashSet<>();
+		endpointDAO.getEndpoints().forEach(e -> userRoles.addAll(getUserRoleFromFile(e.getCloudProvider())));
+		return userRoles.stream().collect(collectingAndThen(toCollection(() -> new TreeSet<>(comparing(UserRoleDto::getId))),
+				ArrayList::new));
+	}
+
+	private List<UserRoleDto> getUserRoleFromFile(CloudProvider cloudProvider) {
+		try (InputStream is = getClass().getResourceAsStream(format(ROLES_FILE_FORMAT, cloudProvider.getName()))) {
 			return MAPPER.readValue(is, new TypeReference<List<UserRoleDto>>() {
 			});
 		} catch (IOException e) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/RestoreHandlerStartupListener.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/RestoreHandlerStartupListener.java
index 73f5961..0de166c 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/RestoreHandlerStartupListener.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dropwizard/listeners/RestoreHandlerStartupListener.java
@@ -19,24 +19,30 @@
 
 package com.epam.dlab.backendapi.dropwizard.listeners;
 
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.rest.client.RESTService;
 import io.dropwizard.lifecycle.ServerLifecycleListener;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.StringUtils;
 import org.eclipse.jetty.server.Server;
 
 @Slf4j
 public class RestoreHandlerStartupListener implements ServerLifecycleListener {
 
 	private final RESTService provisioningService;
+	private final EndpointService endpointService;
 
-	public RestoreHandlerStartupListener(RESTService provisioningService) {
+	public RestoreHandlerStartupListener(RESTService provisioningService, EndpointService endpointService) {
 		this.provisioningService = provisioningService;
+		this.endpointService = endpointService;
 	}
 
 	@Override
 	public void serverStarted(Server server) {
 		try {
-			provisioningService.post("/handler/restore", new Object(), Object.class);
+			endpointService.getEndpointsWithStatus(EndpointDTO.EndpointStatus.ACTIVE)
+					.forEach(e -> provisioningService.post(e.getUrl() + "/handler/restore", StringUtils.EMPTY, Object.class));
 		} catch (Exception e) {
 			log.error("Exception occurred during restore handler request: {}", e.getMessage());
 		}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/interceptor/ProjectAdminInterceptor.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/interceptor/ProjectAdminInterceptor.java
new file mode 100644
index 0000000..a536dab
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/interceptor/ProjectAdminInterceptor.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.interceptor;
+
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.annotation.Project;
+import com.epam.dlab.backendapi.annotation.User;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.exceptions.DlabException;
+import com.epam.dlab.exceptions.ResourceQuoteReachedException;
+import com.google.inject.Inject;
+import lombok.extern.slf4j.Slf4j;
+import org.aopalliance.intercept.MethodInterceptor;
+import org.aopalliance.intercept.MethodInvocation;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Parameter;
+import java.util.Objects;
+import java.util.stream.IntStream;
+
+@Slf4j
+public class ProjectAdminInterceptor implements MethodInterceptor {
+    @Inject
+    private ProjectService projectService;
+
+    @Override
+    public Object invoke(MethodInvocation mi) throws Throwable {
+        if (grantAccess(mi)) {
+            return mi.proceed();
+        } else {
+            final Method method = mi.getMethod();
+            log.warn("Execution of method {} failed because user doesn't have appropriate permission", method.getName());
+            throw new ResourceQuoteReachedException("Operation can not be finished. User doesn't have appropriate permission");
+        }
+    }
+
+    private boolean grantAccess(MethodInvocation mi) {
+        final Parameter[] parameters = mi.getMethod().getParameters();
+        String project = IntStream.range(0, parameters.length)
+                .filter(i -> Objects.nonNull(parameters[i].getAnnotation(Project.class)))
+                .mapToObj(i -> (String) mi.getArguments()[i])
+                .findAny()
+                .orElseThrow(() -> new DlabException("Project parameter wanted!"));
+        UserInfo userInfo = IntStream.range(0, parameters.length)
+                .filter(i -> Objects.nonNull(parameters[i].getAnnotation(User.class)))
+                .mapToObj(i -> (UserInfo) mi.getArguments()[i])
+                .findAny()
+                .orElseThrow(() -> new DlabException("UserInfo parameter wanted!"));
+
+        return checkPermission(userInfo, project);
+    }
+
+    private boolean checkPermission(UserInfo userInfo, String project) {
+        return UserRoles.isAdmin(userInfo) || UserRoles.isProjectAdmin(userInfo, projectService.get(project).getGroups());
+    }
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AwsSelfServiceModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AwsSelfServiceModule.java
deleted file mode 100644
index f4db277..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AwsSelfServiceModule.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.modules;
-
-import com.epam.dlab.backendapi.SelfServiceApplication;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.annotation.BudgetLimited;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.dao.aws.AwsKeyDao;
-import com.epam.dlab.backendapi.interceptor.BudgetLimitInterceptor;
-import com.epam.dlab.backendapi.resources.aws.BillingResourceAws;
-import com.epam.dlab.backendapi.resources.aws.ComputationalResourceAws;
-import com.epam.dlab.backendapi.resources.callback.aws.EdgeCallbackAws;
-import com.epam.dlab.backendapi.resources.callback.aws.KeyUploaderCallbackAws;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.service.InfrastructureInfoService;
-import com.epam.dlab.backendapi.service.InfrastructureTemplateService;
-import com.epam.dlab.backendapi.service.aws.AwsBillingService;
-import com.epam.dlab.backendapi.service.aws.AwsInfrastructureInfoService;
-import com.epam.dlab.backendapi.service.aws.AwsInfrastructureTemplateService;
-import com.epam.dlab.cloud.CloudModule;
-import com.epam.dlab.mongo.MongoServiceFactory;
-import com.fiestacabin.dropwizard.quartz.SchedulerConfiguration;
-import com.google.inject.Injector;
-import com.google.inject.Provides;
-import com.google.inject.Singleton;
-import io.dropwizard.setup.Environment;
-import org.quartz.Scheduler;
-import org.quartz.SchedulerException;
-import org.quartz.impl.StdSchedulerFactory;
-
-import static com.google.inject.matcher.Matchers.annotatedWith;
-import static com.google.inject.matcher.Matchers.any;
-
-public class AwsSelfServiceModule extends CloudModule {
-
-	private static final String MONGO_URI_FORMAT = "mongodb://%s:%s@%s:%d/%s";
-	private static final String QUARTZ_MONGO_URI_PROPERTY = "org.quartz.jobStore.mongoUri";
-	private static final String QUARTZ_DB_NAME = "org.quartz.jobStore.dbName";
-
-	@Override
-	protected void configure() {
-		bind(BillingService.class).to(AwsBillingService.class);
-		bind((KeyDAO.class)).to(AwsKeyDao.class);
-		bind(InfrastructureInfoService.class).to(AwsInfrastructureInfoService.class);
-		bind(SchedulerConfiguration.class).toInstance(
-				new SchedulerConfiguration(SelfServiceApplication.class.getPackage().getName()));
-		bind(InfrastructureTemplateService.class).to(AwsInfrastructureTemplateService.class);
-		bind(BillingDAO.class).to(AwsBillingDAO.class);
-		final BudgetLimitInterceptor budgetLimitInterceptor = new BudgetLimitInterceptor();
-		requestInjection(budgetLimitInterceptor);
-		bindInterceptor(any(), annotatedWith(BudgetLimited.class), budgetLimitInterceptor);
-	}
-
-	@Override
-	public void init(Environment environment, Injector injector) {
-		environment.jersey().register(injector.getInstance(EdgeCallbackAws.class));
-		environment.jersey().register(injector.getInstance(KeyUploaderCallbackAws.class));
-		environment.jersey().register(injector.getInstance(ComputationalResourceAws.class));
-		environment.jersey().register(injector.getInstance(BillingResourceAws.class));
-
-		/*injector.getInstance(SecurityFactory.class).configure(injector, environment,
-				SelfServiceSecurityAuthenticator.class, injector.getInstance(Authorizer.class));*/
-	}
-
-
-	@Provides
-	@Singleton
-	Scheduler provideScheduler(SelfServiceApplicationConfiguration configuration) throws SchedulerException {
-		final MongoServiceFactory mongoFactory = configuration.getMongoFactory();
-		final String database = mongoFactory.getDatabase();
-		final String mongoUri = String.format(MONGO_URI_FORMAT, mongoFactory.getUsername(), mongoFactory.getPassword(),
-				mongoFactory.getHost(), mongoFactory.getPort(), database);
-		System.setProperty(QUARTZ_MONGO_URI_PROPERTY, mongoUri);
-		System.setProperty(QUARTZ_DB_NAME, database);
-		return StdSchedulerFactory.getDefaultScheduler();
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AzureSelfServiceModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AzureSelfServiceModule.java
deleted file mode 100644
index f1b62e1..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AzureSelfServiceModule.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.modules;
-
-import com.epam.dlab.backendapi.SelfServiceApplication;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.annotation.BudgetLimited;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.dao.azure.AzureBillingDAO;
-import com.epam.dlab.backendapi.dao.azure.AzureKeyDao;
-import com.epam.dlab.backendapi.interceptor.BudgetLimitInterceptor;
-import com.epam.dlab.backendapi.resources.azure.BillingResourceAzure;
-import com.epam.dlab.backendapi.resources.azure.ComputationalResourceAzure;
-import com.epam.dlab.backendapi.resources.callback.azure.EdgeCallbackAzure;
-import com.epam.dlab.backendapi.resources.callback.azure.KeyUploaderCallbackAzure;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.service.InfrastructureInfoService;
-import com.epam.dlab.backendapi.service.InfrastructureTemplateService;
-import com.epam.dlab.backendapi.service.azure.AzureBillingService;
-import com.epam.dlab.backendapi.service.azure.AzureInfrastructureInfoService;
-import com.epam.dlab.backendapi.service.azure.AzureInfrastructureTemplateService;
-import com.epam.dlab.cloud.CloudModule;
-import com.epam.dlab.mongo.MongoServiceFactory;
-import com.fiestacabin.dropwizard.quartz.SchedulerConfiguration;
-import com.google.inject.Injector;
-import com.google.inject.Provides;
-import com.google.inject.Singleton;
-import io.dropwizard.setup.Environment;
-import lombok.extern.slf4j.Slf4j;
-import org.quartz.Scheduler;
-import org.quartz.SchedulerException;
-import org.quartz.impl.StdSchedulerFactory;
-
-import static com.google.inject.matcher.Matchers.annotatedWith;
-import static com.google.inject.matcher.Matchers.any;
-
-@Slf4j
-public class AzureSelfServiceModule extends CloudModule {
-
-	private static final String MONGO_URI_FORMAT = "mongodb://%s:%s@%s:%d/%s";
-	private static final String QUARTZ_MONGO_URI_PROPERTY = "org.quartz.jobStore.mongoUri";
-	private static final String QUARTZ_DB_NAME = "org.quartz.jobStore.dbName";
-	private boolean useLdap;
-	private long maxSessionDurabilityMilliseconds;
-
-	public AzureSelfServiceModule(boolean useLdap, long maxSessionDurabilityMilliseconds) {
-		this.useLdap = useLdap;
-		this.maxSessionDurabilityMilliseconds = maxSessionDurabilityMilliseconds;
-	}
-
-	@Override
-	protected void configure() {
-		bind(BillingService.class).to(AzureBillingService.class);
-		bind((KeyDAO.class)).to(AzureKeyDao.class);
-		bind(InfrastructureInfoService.class).to(AzureInfrastructureInfoService.class);
-		bind(SchedulerConfiguration.class).toInstance(
-				new SchedulerConfiguration(SelfServiceApplication.class.getPackage().getName()));
-		bind(InfrastructureTemplateService.class).to(AzureInfrastructureTemplateService.class);
-		bind(BillingDAO.class).to(AzureBillingDAO.class);
-		final BudgetLimitInterceptor budgetLimitInterceptor = new BudgetLimitInterceptor();
-		requestInjection(budgetLimitInterceptor);
-		bindInterceptor(any(), annotatedWith(BudgetLimited.class), budgetLimitInterceptor);
-	}
-
-	@Override
-	public void init(Environment environment, Injector injector) {
-		environment.jersey().register(injector.getInstance(EdgeCallbackAzure.class));
-		environment.jersey().register(injector.getInstance(KeyUploaderCallbackAzure.class));
-		environment.jersey().register(injector.getInstance(ComputationalResourceAzure.class));
-		environment.jersey().register(injector.getInstance(BillingResourceAzure.class));
-
-	}
-
-	@Provides
-	@Singleton
-	Scheduler provideScheduler(SelfServiceApplicationConfiguration configuration) throws SchedulerException {
-		final MongoServiceFactory mongoFactory = configuration.getMongoFactory();
-		final String database = mongoFactory.getDatabase();
-		final String mongoUri = String.format(MONGO_URI_FORMAT, mongoFactory.getUsername(), mongoFactory.getPassword(),
-				mongoFactory.getHost(), mongoFactory.getPort(), database);
-		System.setProperty(QUARTZ_MONGO_URI_PROPERTY, mongoUri);
-		System.setProperty(QUARTZ_DB_NAME, database);
-		return StdSchedulerFactory.getDefaultScheduler();
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/GcpSelfServiceModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/CloudProviderModule.java
similarity index 64%
rename from services/self-service/src/main/java/com/epam/dlab/backendapi/modules/GcpSelfServiceModule.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/modules/CloudProviderModule.java
index 90c3fde..8b41baf 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/GcpSelfServiceModule.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/CloudProviderModule.java
@@ -21,23 +21,21 @@
 
 import com.epam.dlab.backendapi.SelfServiceApplication;
 import com.epam.dlab.backendapi.annotation.BudgetLimited;
+import com.epam.dlab.backendapi.annotation.ProjectAdmin;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.dao.gcp.GcpBillingDao;
-import com.epam.dlab.backendapi.dao.gcp.GcpKeyDao;
 import com.epam.dlab.backendapi.interceptor.BudgetLimitInterceptor;
-import com.epam.dlab.backendapi.resources.callback.gcp.EdgeCallbackGcp;
-import com.epam.dlab.backendapi.resources.callback.gcp.KeyUploaderCallbackGcp;
-import com.epam.dlab.backendapi.resources.gcp.BillingResourceGcp;
+import com.epam.dlab.backendapi.interceptor.ProjectAdminInterceptor;
+import com.epam.dlab.backendapi.resources.BillingResource;
+import com.epam.dlab.backendapi.resources.aws.ComputationalResourceAws;
+import com.epam.dlab.backendapi.resources.azure.ComputationalResourceAzure;
 import com.epam.dlab.backendapi.resources.gcp.ComputationalResourceGcp;
 import com.epam.dlab.backendapi.resources.gcp.GcpOauthResource;
 import com.epam.dlab.backendapi.service.BillingService;
 import com.epam.dlab.backendapi.service.InfrastructureInfoService;
 import com.epam.dlab.backendapi.service.InfrastructureTemplateService;
-import com.epam.dlab.backendapi.service.gcp.GcpBillingService;
-import com.epam.dlab.backendapi.service.gcp.GcpInfrastructureInfoService;
-import com.epam.dlab.backendapi.service.gcp.GcpInfrastructureTemplateService;
+import com.epam.dlab.backendapi.service.impl.BillingServiceImpl;
+import com.epam.dlab.backendapi.service.impl.InfrastructureInfoServiceImpl;
+import com.epam.dlab.backendapi.service.impl.InfrastructureTemplateServiceImpl;
 import com.epam.dlab.cloud.CloudModule;
 import com.epam.dlab.mongo.MongoServiceFactory;
 import com.fiestacabin.dropwizard.quartz.SchedulerConfiguration;
@@ -52,38 +50,43 @@
 import static com.google.inject.matcher.Matchers.annotatedWith;
 import static com.google.inject.matcher.Matchers.any;
 
-public class GcpSelfServiceModule extends CloudModule {
+public class CloudProviderModule extends CloudModule {
 
     private static final String MONGO_URI_FORMAT = "mongodb://%s:%s@%s:%d/%s";
     private static final String QUARTZ_MONGO_URI_PROPERTY = "org.quartz.jobStore.mongoUri";
     private static final String QUARTZ_DB_NAME = "org.quartz.jobStore.dbName";
 
-    @Override
-    @SuppressWarnings("unchecked")
-    public void init(Environment environment, Injector injector) {
+    private SelfServiceApplicationConfiguration configuration;
 
-		environment.jersey().register(injector.getInstance(EdgeCallbackGcp.class));
-		environment.jersey().register(injector.getInstance(KeyUploaderCallbackGcp.class));
-		environment.jersey().register(injector.getInstance(ComputationalResourceGcp.class));
-		environment.jersey().register(injector.getInstance(BillingResourceGcp.class));
-		if (injector.getInstance(SelfServiceApplicationConfiguration.class).isGcpOuauth2AuthenticationEnabled()) {
-			environment.jersey().register(injector.getInstance(GcpOauthResource.class));
-		}
-
+    public CloudProviderModule(SelfServiceApplicationConfiguration configuration) {
+        this.configuration = configuration;
     }
 
     @Override
     protected void configure() {
-        bind(BillingService.class).to(GcpBillingService.class);
-        bind((KeyDAO.class)).to(GcpKeyDao.class);
-        bind(InfrastructureInfoService.class).to(GcpInfrastructureInfoService.class);
-        bind(InfrastructureTemplateService.class).to(GcpInfrastructureTemplateService.class);
-        bind(BillingDAO.class).to(GcpBillingDao.class);
+        bind(BillingService.class).to(BillingServiceImpl.class);
+        bind(InfrastructureInfoService.class).to(InfrastructureInfoServiceImpl.class);
+        bind(InfrastructureTemplateService.class).to(InfrastructureTemplateServiceImpl.class);
         bind(SchedulerConfiguration.class).toInstance(
                 new SchedulerConfiguration(SelfServiceApplication.class.getPackage().getName()));
+
         final BudgetLimitInterceptor budgetLimitInterceptor = new BudgetLimitInterceptor();
+        final ProjectAdminInterceptor projectAdminInterceptor = new ProjectAdminInterceptor();
         requestInjection(budgetLimitInterceptor);
+        requestInjection(projectAdminInterceptor);
         bindInterceptor(any(), annotatedWith(BudgetLimited.class), budgetLimitInterceptor);
+        bindInterceptor(any(), annotatedWith(ProjectAdmin.class), projectAdminInterceptor);
+    }
+
+    @Override
+    public void init(Environment environment, Injector injector) {
+        environment.jersey().register(injector.getInstance(BillingResource.class));
+        environment.jersey().register(injector.getInstance(ComputationalResourceAws.class));
+        environment.jersey().register(injector.getInstance(ComputationalResourceAzure.class));
+        environment.jersey().register(injector.getInstance(ComputationalResourceGcp.class));
+        if (injector.getInstance(SelfServiceApplicationConfiguration.class).isGcpOuauth2AuthenticationEnabled()) {
+            environment.jersey().register(injector.getInstance(GcpOauthResource.class));
+        }
     }
 
     @Provides
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java
index 3594826..9275319 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java
@@ -23,9 +23,66 @@
 import com.epam.dlab.auth.contract.SecurityAPI;
 import com.epam.dlab.backendapi.auth.SelfServiceSecurityAuthorizer;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.*;
-import com.epam.dlab.backendapi.service.*;
-import com.epam.dlab.backendapi.service.impl.*;
+import com.epam.dlab.backendapi.dao.BackupDao;
+import com.epam.dlab.backendapi.dao.BackupDaoImpl;
+import com.epam.dlab.backendapi.dao.BaseBillingDAO;
+import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAOImpl;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDaoImpl;
+import com.epam.dlab.backendapi.dao.ProjectDAO;
+import com.epam.dlab.backendapi.dao.ProjectDAOImpl;
+import com.epam.dlab.backendapi.dao.UserGroupDao;
+import com.epam.dlab.backendapi.dao.UserGroupDaoImpl;
+import com.epam.dlab.backendapi.dao.UserRoleDao;
+import com.epam.dlab.backendapi.dao.UserRoleDaoImpl;
+import com.epam.dlab.backendapi.service.AccessKeyService;
+import com.epam.dlab.backendapi.service.ApplicationSettingService;
+import com.epam.dlab.backendapi.service.ApplicationSettingServiceImpl;
+import com.epam.dlab.backendapi.service.BackupService;
+import com.epam.dlab.backendapi.service.ComputationalService;
+import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.EnvironmentService;
+import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.ExternalLibraryService;
+import com.epam.dlab.backendapi.service.GitCredentialService;
+import com.epam.dlab.backendapi.service.GuacamoleService;
+import com.epam.dlab.backendapi.service.ImageExploratoryService;
+import com.epam.dlab.backendapi.service.InactivityService;
+import com.epam.dlab.backendapi.service.KeycloakService;
+import com.epam.dlab.backendapi.service.KeycloakServiceImpl;
+import com.epam.dlab.backendapi.service.LibraryService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.backendapi.service.ReuploadKeyService;
+import com.epam.dlab.backendapi.service.SchedulerJobService;
+import com.epam.dlab.backendapi.service.SecurityService;
+import com.epam.dlab.backendapi.service.SecurityServiceImpl;
+import com.epam.dlab.backendapi.service.SystemInfoService;
+import com.epam.dlab.backendapi.service.TagService;
+import com.epam.dlab.backendapi.service.TagServiceImpl;
+import com.epam.dlab.backendapi.service.UserGroupService;
+import com.epam.dlab.backendapi.service.UserRoleService;
+import com.epam.dlab.backendapi.service.UserRoleServiceImpl;
+import com.epam.dlab.backendapi.service.UserSettingService;
+import com.epam.dlab.backendapi.service.UserSettingServiceImpl;
+import com.epam.dlab.backendapi.service.impl.AccessKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.BackupServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ComputationalServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EndpointServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EnvironmentServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GitCredentialServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GuacamoleServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ImageExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.InactivityServiceImpl;
+import com.epam.dlab.backendapi.service.impl.LibraryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.MavenCentralLibraryService;
+import com.epam.dlab.backendapi.service.impl.ProjectServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ReuploadKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SystemInfoServiceImpl;
+import com.epam.dlab.backendapi.service.impl.UserGroupServiceImpl;
 import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.mongo.MongoService;
 import com.epam.dlab.rest.client.RESTService;
@@ -75,6 +132,9 @@
 		bind(RESTService.class).annotatedWith(Names.named(ServiceConsts.PROVISIONING_SERVICE_NAME))
 				.toInstance(configuration.getProvisioningFactory()
 						.build(environment, ServiceConsts.PROVISIONING_SERVICE_NAME));
+		bind(RESTService.class).annotatedWith(Names.named(ServiceConsts.BILLING_SERVICE_NAME))
+				.toInstance(configuration.getBillingFactory()
+						.build(environment, ServiceConsts.BILLING_SERVICE_NAME));
 		bind(ImageExploratoryService.class).to(ImageExploratoryServiceImpl.class);
 		bind(ImageExploratoryDao.class).to(ImageExploratoryDaoImpl.class);
 		bind(BackupService.class).to(BackupServiceImpl.class);
@@ -89,9 +149,7 @@
 		bind(LibraryService.class).to(LibraryServiceImpl.class);
 		bind(SchedulerJobService.class).to(SchedulerJobServiceImpl.class);
 		bind(EnvironmentService.class).to(EnvironmentServiceImpl.class);
-		bind(EdgeService.class).to(EdgeServiceImpl.class);
 		bind(ReuploadKeyService.class).to(ReuploadKeyServiceImpl.class);
-		bind(UserResourceService.class).to(UserResourceServiceImpl.class);
 		bind(RESTService.class).annotatedWith(Names.named(ServiceConsts.MAVEN_SEARCH_API))
 				.toInstance(configuration.getMavenApiFactory().build(environment, ServiceConsts.MAVEN_SEARCH_API));
 
@@ -108,6 +166,7 @@
 		bind(EndpointDAO.class).to(EndpointDAOImpl.class);
 		bind(ProjectService.class).to(ProjectServiceImpl.class);
 		bind(ProjectDAO.class).to(ProjectDAOImpl.class);
+		bind(BillingDAO.class).to(BaseBillingDAO.class);
 	}
 
 	private void configureCors(Environment environment) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java
index 06ac774..eb8d3bc 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java
@@ -46,16 +46,6 @@
 	}
 
 	public static CloudModule getCloudProviderModule(SelfServiceApplicationConfiguration configuration) {
-		switch (configuration.getCloudProvider()) {
-			case AWS:
-				return new AwsSelfServiceModule();
-			case AZURE:
-				return new AzureSelfServiceModule(configuration.isAzureUseLdap(),
-						configuration.getMaxSessionDurabilityMilliseconds());
-			case GCP:
-				return new GcpSelfServiceModule();
-			default:
-				throw new UnsupportedOperationException("Unsupported cloud provider " + configuration.getCloudProvider());
-		}
+		return new CloudProviderModule(configuration);
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java
index f66487a..d20adbf 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java
@@ -20,11 +20,68 @@
 package com.epam.dlab.backendapi.modules;
 
 import com.epam.dlab.ModuleBase;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.auth.SelfServiceSecurityAuthorizer;
-import com.epam.dlab.backendapi.dao.*;
-import com.epam.dlab.backendapi.service.*;
-import com.epam.dlab.backendapi.service.impl.*;
+import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
+import com.epam.dlab.backendapi.dao.BackupDao;
+import com.epam.dlab.backendapi.dao.BackupDaoImpl;
+import com.epam.dlab.backendapi.dao.BaseBillingDAO;
+import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAOImpl;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDaoImpl;
+import com.epam.dlab.backendapi.dao.ProjectDAO;
+import com.epam.dlab.backendapi.dao.ProjectDAOImpl;
+import com.epam.dlab.backendapi.dao.UserGroupDao;
+import com.epam.dlab.backendapi.dao.UserGroupDaoImpl;
+import com.epam.dlab.backendapi.dao.UserRoleDao;
+import com.epam.dlab.backendapi.dao.UserRoleDaoImpl;
+import com.epam.dlab.backendapi.service.AccessKeyService;
+import com.epam.dlab.backendapi.service.ApplicationSettingService;
+import com.epam.dlab.backendapi.service.ApplicationSettingServiceImpl;
+import com.epam.dlab.backendapi.service.BackupService;
+import com.epam.dlab.backendapi.service.ComputationalService;
+import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.EnvironmentService;
+import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.ExternalLibraryService;
+import com.epam.dlab.backendapi.service.GitCredentialService;
+import com.epam.dlab.backendapi.service.GuacamoleService;
+import com.epam.dlab.backendapi.service.ImageExploratoryService;
+import com.epam.dlab.backendapi.service.InactivityService;
+import com.epam.dlab.backendapi.service.KeycloakService;
+import com.epam.dlab.backendapi.service.KeycloakServiceImpl;
+import com.epam.dlab.backendapi.service.LibraryService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.backendapi.service.ReuploadKeyService;
+import com.epam.dlab.backendapi.service.SchedulerJobService;
+import com.epam.dlab.backendapi.service.SecurityService;
+import com.epam.dlab.backendapi.service.SecurityServiceImpl;
+import com.epam.dlab.backendapi.service.SystemInfoService;
+import com.epam.dlab.backendapi.service.TagService;
+import com.epam.dlab.backendapi.service.TagServiceImpl;
+import com.epam.dlab.backendapi.service.UserGroupService;
+import com.epam.dlab.backendapi.service.UserRoleService;
+import com.epam.dlab.backendapi.service.UserRoleServiceImpl;
+import com.epam.dlab.backendapi.service.UserSettingService;
+import com.epam.dlab.backendapi.service.UserSettingServiceImpl;
+import com.epam.dlab.backendapi.service.impl.AccessKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.BackupServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ComputationalServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EndpointServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EnvironmentServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GitCredentialServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GuacamoleServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ImageExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.InactivityServiceImpl;
+import com.epam.dlab.backendapi.service.impl.LibraryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.MavenCentralLibraryService;
+import com.epam.dlab.backendapi.service.impl.ProjectServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ReuploadKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SystemInfoServiceImpl;
+import com.epam.dlab.backendapi.service.impl.UserGroupServiceImpl;
 import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.mongo.MongoService;
 import com.epam.dlab.rest.client.RESTService;
@@ -66,6 +123,9 @@
 		bind(RESTService.class).annotatedWith(Names.named(ServiceConsts.PROVISIONING_SERVICE_NAME))
 				.toInstance(configuration.getProvisioningFactory().build(environment, ServiceConsts
 						.PROVISIONING_SERVICE_NAME));
+		bind(RESTService.class).annotatedWith(Names.named(ServiceConsts.BILLING_SERVICE_NAME))
+				.toInstance(configuration.getBillingFactory()
+						.build(environment, ServiceConsts.BILLING_SERVICE_NAME));
 		bind(ImageExploratoryService.class).to(ImageExploratoryServiceImpl.class);
 		bind(ImageExploratoryDao.class).to(ImageExploratoryDaoImpl.class);
 		bind(BackupService.class).to(BackupServiceImpl.class);
@@ -78,9 +138,7 @@
 		bind(LibraryService.class).to(LibraryServiceImpl.class);
 		bind(SchedulerJobService.class).to(SchedulerJobServiceImpl.class);
 		bind(EnvironmentService.class).to(EnvironmentServiceImpl.class);
-		bind(EdgeService.class).to(EdgeServiceImpl.class);
 		bind(ReuploadKeyService.class).to(ReuploadKeyServiceImpl.class);
-		bind(UserResourceService.class).to(UserResourceServiceImpl.class);
 		bind(RESTService.class).annotatedWith(Names.named(ServiceConsts.MAVEN_SEARCH_API))
 				.toInstance(configuration.getMavenApiFactory().build(environment, ServiceConsts.MAVEN_SEARCH_API));
 		bind(ExternalLibraryService.class).to(MavenCentralLibraryService.class);
@@ -97,6 +155,7 @@
 		bind(EndpointDAO.class).to(EndpointDAOImpl.class);
 		bind(ProjectService.class).to(ProjectServiceImpl.class);
 		bind(ProjectDAO.class).to(ProjectDAOImpl.class);
+		bind(BillingDAO.class).to(BaseBillingDAO.class);
 		bind(TagService.class).to(TagServiceImpl.class);
 		bind(SecurityService.class).to(SecurityServiceImpl.class);
 		bind(KeycloakService.class).to(KeycloakServiceImpl.class);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/BillingResourceAzure.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/BillingResource.java
similarity index 61%
rename from services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/BillingResourceAzure.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/resources/BillingResource.java
index e9169fd..1916a38 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/BillingResourceAzure.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/BillingResource.java
@@ -17,14 +17,13 @@
  * under the License.
  */
 
-package com.epam.dlab.backendapi.resources.azure;
+package com.epam.dlab.backendapi.resources;
 
 import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.resources.dto.azure.AzureBillingFilter;
+import com.epam.dlab.backendapi.resources.dto.BillingFilter;
 import com.epam.dlab.backendapi.service.BillingService;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
-import org.bson.Document;
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
@@ -36,45 +35,30 @@
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
-/**
- * Provides API to get or export billing information.
- */
 @Path("/billing")
 @Consumes(MediaType.APPLICATION_JSON)
-public class BillingResourceAzure {
+public class BillingResource {
+
+    private final BillingService billingService;
 
     @Inject
-    private BillingService billingService;
+    public BillingResource(BillingService billingService) {
+        this.billingService = billingService;
+    }
 
-    /**
-     * Returns the billing report.
-     *
-     * @param userInfo user info.
-     * @param filter   filter for billing data.
-     */
     @POST
     @Path("/report")
     @Produces(MediaType.APPLICATION_JSON)
-    @SuppressWarnings("unchecked")
-    public Document getBillingReport(@Auth UserInfo userInfo, @Valid @NotNull AzureBillingFilter filter) {
-        return billingService.getBillingReport(userInfo, filter);
+    public Response getBillingReport(@Auth UserInfo userInfo, @Valid @NotNull BillingFilter filter) {
+        return Response.ok(billingService.getBillingReport(userInfo, filter)).build();
     }
 
-    /**
-     * Returns the billing report in csv file.
-     *
-     * @param userInfo user info.
-     * @param filter   filter for report data.
-     */
-
     @POST
     @Path("/report/download")
     @Produces(MediaType.APPLICATION_OCTET_STREAM)
-    @SuppressWarnings("unchecked")
-    public Response downloadBillingReport(@Auth UserInfo userInfo, @Valid @NotNull AzureBillingFilter filter) {
+    public Response downloadBillingReport(@Auth UserInfo userInfo, @Valid @NotNull BillingFilter filter) {
         return Response.ok(billingService.downloadReport(userInfo, filter))
-                .header(HttpHeaders.CONTENT_DISPOSITION,
-                        "attachment; filename=\"" + billingService.getReportFileName(userInfo, filter) + "\"")
+                .header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"billing-report.csv\"")
                 .build();
     }
-}
\ No newline at end of file
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EdgeResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EdgeResource.java
deleted file mode 100644
index 3ae31b3..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EdgeResource.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.service.EdgeService;
-import com.epam.dlab.rest.contracts.EdgeAPI;
-import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
-import lombok.extern.slf4j.Slf4j;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-
-/**
- * Provides the REST API to manage(start/stop) edge node
- */
-@Path("/infrastructure/edge")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class EdgeResource implements EdgeAPI {
-
-	private final EdgeService edgeService;
-
-	@Inject
-	public EdgeResource(EdgeService edgeService) {
-		this.edgeService = edgeService;
-	}
-
-	/**
-	 * Starts EDGE node for user.
-	 *
-	 * @param userInfo user info.
-	 * @return Request Id.
-	 */
-	@POST
-	@Path("/start")
-	public String start(@Auth UserInfo userInfo) {
-		return edgeService.start(userInfo);
-	}
-
-	/**
-	 * Stop EDGE node for user.
-	 *
-	 * @param userInfo user info.
-	 * @return Request Id.
-	 */
-	@POST
-	@Path("/stop")
-	public String stop(@Auth UserInfo userInfo) {
-		return edgeService.stop(userInfo);
-	}
-}
\ No newline at end of file
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EndpointResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EndpointResource.java
index f1e981d..3b49b42 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EndpointResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EndpointResource.java
@@ -21,6 +21,7 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.EndpointResourcesDTO;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.rest.dto.ErrorDTO;
 import com.google.inject.Inject;
@@ -34,6 +35,7 @@
 import io.swagger.v3.oas.annotations.responses.ApiResponses;
 
 import javax.annotation.security.RolesAllowed;
+import javax.validation.Valid;
 import javax.ws.rs.*;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
@@ -69,8 +71,8 @@
 	})
 	@Consumes(MediaType.APPLICATION_JSON)
 	@POST
-	public Response createEndpoint(@Parameter(hidden = true) @Auth UserInfo userInfo, EndpointDTO endpointDTO) {
-		endpointService.create(endpointDTO);
+	public Response createEndpoint(@Parameter(hidden = true) @Auth UserInfo userInfo, @Valid EndpointDTO endpointDTO) {
+		endpointService.create(userInfo, endpointDTO);
 		final URI uri = uriInfo.getRequestUriBuilder().path(endpointDTO.getName()).build();
 		return Response
 				.ok()
@@ -108,6 +110,20 @@
 		return Response.ok(endpointService.getEndpoints()).build();
 	}
 
+	@Operation(summary = "Get resources related to the endpoint", tags = "endpoint")
+	@ApiResponses({
+			@ApiResponse(responseCode = "200", description = "Return information about resources of endpoint",
+					content = @Content(mediaType = MediaType.APPLICATION_JSON, schema =
+					@Schema(implementation = EndpointResourcesDTO.class)))
+	})
+	@GET
+	@Path("{name}/resources")
+	@Produces(MediaType.APPLICATION_JSON)
+	public Response getEndpointResources(@Parameter(hidden = true) @Auth UserInfo userInfo,
+										 @Parameter(description = "Endpoint name")
+										 @PathParam("name") String name) {
+		return Response.ok(endpointService.getEndpointResources(name)).build();
+	}
 
 	@Operation(summary = "Remove endpoint", tags = "endpoint")
 	@ApiResponses({
@@ -120,8 +136,25 @@
 	@Path("{name}")
 	public Response removeEndpoint(@Parameter(hidden = true) @Auth UserInfo userInfo,
 								   @Parameter(description = "Endpoint name")
-								   @PathParam("name") String name) {
-		endpointService.remove(name);
+								   @PathParam("name") String name,
+								   @Parameter(description = "Delete endpoint only or with related resources")
+								   @QueryParam("with-resources") @DefaultValue("false") boolean withResources) {
+		endpointService.remove(userInfo, name, withResources);
+		return Response.ok().build();
+	}
+
+	@Operation(summary = "Check whether endpoint url is valid", tags = "endpoint")
+	@ApiResponses({
+			@ApiResponse(responseCode = "200", description = "Valid endpoint url"),
+			@ApiResponse(responseCode = "404", description = "Endpoint url is not valid"),
+	})
+	@GET
+	@Path("url/{url}")
+	@Produces(MediaType.APPLICATION_JSON)
+	public Response checkEndpointUrl(@Parameter(hidden = true) @Auth UserInfo userInfo,
+									 @Parameter(description = "Endpoint url")
+									 @PathParam("url") String url) {
+		endpointService.checkUrl(userInfo, url);
 		return Response.ok().build();
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
index 313947e..3553ff4 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
@@ -27,7 +27,12 @@
 import org.hibernate.validator.constraints.NotEmpty;
 
 import javax.annotation.security.RolesAllowed;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
@@ -44,98 +49,62 @@
 	}
 
 	@GET
-	@Path("user")
-	@Produces(MediaType.APPLICATION_JSON)
-	public Response getUsersWithActiveEnv(@Auth UserInfo userInfo) {
-		log.debug("User {} requested information about active environments", userInfo.getName());
-		return Response.ok(environmentService.getUsers()).build();
-	}
-
-	@GET
 	@Path("all")
 	@Produces(MediaType.APPLICATION_JSON)
 	public Response getAllEnv(@Auth UserInfo userInfo) {
 		log.debug("Admin {} requested information about all user's environment", userInfo.getName());
-		return Response.ok(environmentService.getAllEnv()).build();
+		return Response.ok(environmentService.getAllEnv(userInfo)).build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("terminate")
-	public Response terminateEnv(@Auth UserInfo userInfo,
-								 @NotEmpty String user) {
-		log.info("User {} is terminating {} environment", userInfo.getName(), user);
-		environmentService.terminateEnvironment(user);
-		return Response.ok().build();
-	}
-
-	@POST
-	@Consumes(MediaType.TEXT_PLAIN)
-	@Produces(MediaType.APPLICATION_JSON)
-	@Path("stop")
-	public Response stopEnv(@Auth UserInfo userInfo,
-							@NotEmpty String user) {
-		log.info("User {} is stopping {} environment", userInfo.getName(), user);
-		environmentService.stopEnvironment(user);
-		return Response.ok().build();
-	}
-
-	@POST
-	@Consumes(MediaType.TEXT_PLAIN)
-	@Produces(MediaType.APPLICATION_JSON)
-	@Path("stop/edge")
-	public Response stopEdge(@Auth UserInfo userInfo, @NotEmpty String user) {
-		log.info("Admin {} is stopping edge of user {}", userInfo.getName(), user);
-		environmentService.stopEdge(user);
-		return Response.ok().build();
-	}
-
-	@POST
-	@Consumes(MediaType.TEXT_PLAIN)
-	@Produces(MediaType.APPLICATION_JSON)
-	@Path("stop/{exploratoryName}")
+	@Path("stop/{projectName}/{exploratoryName}")
 	public Response stopNotebook(@Auth UserInfo userInfo, @NotEmpty String user,
+								 @PathParam("projectName") String projectName,
 								 @PathParam("exploratoryName") String exploratoryName) {
 		log.info("Admin {} is stopping notebook {} of user {}", userInfo.getName(), exploratoryName, user);
-		environmentService.stopExploratory(user, exploratoryName);
+		environmentService.stopExploratory(userInfo, user, projectName, exploratoryName);
 		return Response.ok().build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("stop/{exploratoryName}/{computationalName}")
+	@Path("stop/{projectName}/{exploratoryName}/{computationalName}")
 	public Response stopCluster(@Auth UserInfo userInfo, @NotEmpty String user,
+								@PathParam("projectName") String projectName,
 								@PathParam("exploratoryName") String exploratoryName,
 								@PathParam("computationalName") String computationalName) {
 		log.info("Admin {} is stopping computational resource {} affiliated with exploratory {} of user {}",
 				userInfo.getName(), computationalName, exploratoryName, user);
-		environmentService.stopComputational(user, exploratoryName, computationalName);
+		environmentService.stopComputational(userInfo, user, projectName, exploratoryName, computationalName);
 		return Response.ok().build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("terminate/{exploratoryName}")
+	@Path("terminate/{projectName}/{exploratoryName}")
 	public Response terminateNotebook(@Auth UserInfo userInfo, @NotEmpty String user,
+									  @PathParam("projectName") String projectName,
 									  @PathParam("exploratoryName") String exploratoryName) {
 		log.info("Admin {} is terminating notebook {} of user {}", userInfo.getName(), exploratoryName, user);
-		environmentService.terminateExploratory(user, exploratoryName);
+		environmentService.terminateExploratory(userInfo, user, projectName, exploratoryName);
 		return Response.ok().build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("terminate/{exploratoryName}/{computationalName}")
+	@Path("terminate/{projectName}/{exploratoryName}/{computationalName}")
 	public Response terminateCluster(@Auth UserInfo userInfo, @NotEmpty String user,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
 		log.info("Admin {} is terminating computational resource {} affiliated with exploratory {} of user {}",
 				userInfo.getName(), computationalName, exploratoryName, user);
-		environmentService.terminateComputational(user, exploratoryName, computationalName);
+		environmentService.terminateComputational(userInfo, user, projectName, exploratoryName, computationalName);
 		return Response.ok().build();
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
index 6fa0b51..7b29af1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
@@ -37,7 +37,14 @@
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -58,6 +65,10 @@
 		this.exploratoryService = exploratoryService;
 	}
 
+	@GET
+	public Response getExploratoryPopUp(@Auth UserInfo userInfo) {
+		return Response.ok(exploratoryService.getUserInstances(userInfo)).build();
+	}
 	/**
 	 * Creates the exploratory environment for user.
 	 *
@@ -94,7 +105,7 @@
 						@Valid @NotNull ExploratoryActionFormDTO formDTO) {
 		log.debug("Starting exploratory environment {} for user {}", formDTO.getNotebookInstanceName(),
 				userInfo.getName());
-		return exploratoryService.start(userInfo, formDTO.getNotebookInstanceName(), "");
+		return exploratoryService.start(userInfo, formDTO.getNotebookInstanceName(), formDTO.getProjectName());
 	}
 
 	/**
@@ -105,11 +116,12 @@
 	 * @return Invocation response as JSON string.
 	 */
 	@DELETE
-	@Path("/{name}/stop")
+	@Path("/{project}/{name}/stop")
 	public String stop(@Auth UserInfo userInfo,
+					   @PathParam("project") String project,
 					   @PathParam("name") String name) {
 		log.debug("Stopping exploratory environment {} for user {}", name, userInfo.getName());
-		return exploratoryService.stop(userInfo, name);
+		return exploratoryService.stop(userInfo, project, name);
 	}
 
 	/**
@@ -120,29 +132,32 @@
 	 * @return Invocation response as JSON string.
 	 */
 	@DELETE
-	@Path("/{name}/terminate")
+	@Path("/{project}/{name}/terminate")
 	public String terminate(@Auth UserInfo userInfo,
+							@PathParam("project") String project,
 							@PathParam("name") String name) {
 		log.debug("Terminating exploratory environment {} for user {}", name, userInfo.getName());
-		return exploratoryService.terminate(userInfo, name);
+		return exploratoryService.terminate(userInfo, project, name);
 	}
 
 	@PUT
-	@Path("/{name}/reconfigure")
+	@Path("/{project}/{name}/reconfigure")
 	public Response reconfigureSpark(@Auth UserInfo userInfo,
+									 @PathParam("project") String project,
 									 @PathParam("name") String name,
 									 List<ClusterConfig> config) {
 		log.debug("Updating exploratory {} spark cluster for user {}", name, userInfo.getName());
-		exploratoryService.updateClusterConfig(userInfo, name, config);
+		exploratoryService.updateClusterConfig(userInfo, project, name, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("/{name}/cluster/config")
+	@Path("/{project}/{name}/cluster/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("project") String project,
 									 @PathParam("name") String name) {
 		log.debug("Getting exploratory {} spark cluster configuration for user {}", name, userInfo.getName());
-		return Response.ok(exploratoryService.getClusterConfig(userInfo, name)).build();
+		return Response.ok(exploratoryService.getClusterConfig(userInfo, project, name)).build();
 	}
 
 	private Exploratory getExploratory(ExploratoryCreateFormDTO formDTO) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
index fdc15fd..f913e2b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
@@ -30,8 +30,18 @@
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
 import java.net.URI;
 import java.util.List;
 
@@ -58,8 +68,8 @@
 								@Valid @NotNull ExploratoryImageCreateFormDTO formDTO,
 								@Context UriInfo uriInfo) {
 		log.debug("Creating an image {} for user {}", formDTO, ui.getName());
-		String uuid = imageExploratoryService.createImage(ui, formDTO.getNotebookName(), formDTO.getName(), formDTO
-				.getDescription());
+		String uuid = imageExploratoryService.createImage(ui, formDTO.getProjectName(), formDTO.getNotebookName(),
+				formDTO.getName(), formDTO.getDescription());
 		requestId.put(ui.getName(), uuid);
 
 		final URI imageUri = UriBuilder.fromUri(uriInfo.getRequestUri())
@@ -90,8 +100,10 @@
 	@GET
 	@Path("{name}")
 	public Response getImage(@Auth UserInfo ui,
-							 @PathParam("name") String name) {
+							 @PathParam("name") String name,
+							 @QueryParam("project") String project,
+							 @QueryParam("endpoint") String endpoint) {
 		log.debug("Getting image with name {} for user {}", name, ui.getName());
-		return Response.ok(imageExploratoryService.getImage(ui.getName(), name)).build();
+		return Response.ok(imageExploratoryService.getImage(ui.getName(), name, project, endpoint)).build();
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java
index db8197f..c8952f3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java
@@ -22,13 +22,17 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.HealthStatusPageDTO;
 import com.epam.dlab.backendapi.resources.dto.ProjectInfrastructureInfo;
-import com.epam.dlab.backendapi.roles.UserRoles;
 import com.epam.dlab.backendapi.service.InfrastructureInfoService;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
 
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -66,8 +70,7 @@
 	@Path("/status")
 	public HealthStatusPageDTO status(@Auth UserInfo userInfo,
 									  @QueryParam("full") @DefaultValue("0") int fullReport) {
-		return infrastructureInfoService
-				.getHeathStatus(userInfo, fullReport != 0, UserRoles.isAdmin(userInfo));
+		return infrastructureInfoService.getHeathStatus(userInfo, fullReport != 0);
 	}
 
 	/**
@@ -78,7 +81,7 @@
 	@GET
 	@Path("/info")
 	public List<ProjectInfrastructureInfo> getUserResources(@Auth UserInfo userInfo) {
-		return infrastructureInfoService.getUserResources(userInfo.getName());
+		return infrastructureInfoService.getUserResources(userInfo);
 
 	}
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/KeyUploaderResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/KeyUploaderResource.java
deleted file mode 100644
index a58f8dc..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/KeyUploaderResource.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.service.AccessKeyService;
-import com.epam.dlab.dto.keyload.KeyLoadStatus;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.exceptions.DlabValidationException;
-import com.epam.dlab.exceptions.ResourceNotFoundException;
-import com.epam.dlab.rest.contracts.EdgeAPI;
-import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
-import lombok.extern.slf4j.Slf4j;
-import org.glassfish.jersey.media.multipart.FormDataParam;
-
-import javax.ws.rs.*;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-
-/**
- * Provides the REST API for upload the user key.
- */
-@Path("/user/access_key")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class KeyUploaderResource implements EdgeAPI {
-
-	private static final String FILE_ATTACHMENT_FORMAT = "attachment; filename=\"%s.pem\"";
-	private AccessKeyService keyService;
-
-	@Inject
-	public KeyUploaderResource(AccessKeyService keyService) {
-		this.keyService = keyService;
-	}
-
-	/**
-	 * Finds and returns the status of the user key.
-	 *
-	 * @param userInfo user info.
-	 * @return <pre>
-	 * {@link Status#NOT_FOUND} the user key has been not found.
-	 * {@link Status#ACCEPTED} the user key is uploading now.
-	 * {@link Status#OK} the user key is valid.
-	 * {@link Status#INTERNAL_SERVER_ERROR} the check of the status is failed.
-	 * </pre>
-	 */
-	@GET
-	public Response checkKey(@Auth UserInfo userInfo) {
-		final KeyLoadStatus status = keyService.getUserKeyStatus(userInfo.getName());
-		if (KeyLoadStatus.NONE == status) {
-			throw new ResourceNotFoundException("Key for user " + userInfo.getName() + " not found");
-		} else if (KeyLoadStatus.ERROR == status) {
-			throw new DlabException("Key for user " + userInfo.getName() + " is in error state");
-		}
-		return Response.status(status.getHttpStatus()).build();
-	}
-
-	/**
-	 * Uploads/reuploads the user key to server. If param 'isPrimaryUploading' equals 'true', then it stores
-	 * the user key to the database and calls the post method of the provisioning service for the key uploading
-	 * and edge creating for user. Else if this param equals 'false', then only replacing keys in the database
-	 * will be performed (user's key will be reuploaded).
-	 *
-	 * @param userInfo           user info.
-	 * @param fileContent        content of the user key.
-	 * @param isPrimaryUploading true if key is being primarily uploaded, false - in case of reuploading
-	 * @return 200 Ok
-	 */
-	@POST
-	@Consumes(MediaType.MULTIPART_FORM_DATA)
-	public Response loadKey( @Auth UserInfo userInfo,
-							@FormDataParam("file") String fileContent,
-							@QueryParam("is_primary_uploading") @DefaultValue("true") boolean isPrimaryUploading) {
-
-		validate(fileContent);
-		keyService.uploadKey(userInfo, fileContent, isPrimaryUploading);
-		return Response.ok().build();
-	}
-
-	/**
-	 * Creates the EDGE node and upload the user key  for user.
-	 *
-	 * @param userInfo user info.
-	 * @return {@link Response.Status#OK} request for provisioning service has been accepted.<br>
-	 */
-	@POST
-	@Path("/recover")
-	public Response recover(@Auth UserInfo userInfo) {
-		return Response.ok(keyService.recoverEdge(userInfo)).build();
-	}
-
-
-	@POST
-	@Path("/generate")
-	@Produces(MediaType.APPLICATION_OCTET_STREAM)
-	public Response generate(@Auth UserInfo userInfo,
-							 @QueryParam("is_primary_uploading") @DefaultValue("true") boolean isPrimaryUploading) {
-		final Response.ResponseBuilder builder = Response.ok(keyService.generateKey(userInfo, isPrimaryUploading));
-		builder.header(HttpHeaders.CONTENT_DISPOSITION, String.format(FILE_ATTACHMENT_FORMAT, userInfo.getName()));
-		return builder.build();
-	}
-
-	private void validate(String publicKey) {
-		if (!publicKey.startsWith("ssh-")) {
-			log.error("Wrong key format. Key should be in openSSH format");
-			log.trace("Key content:\n{}", publicKey);
-			throw new DlabValidationException("Wrong key format. Key should be in openSSH format");
-		}
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
index 64ede19..841ed73 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
@@ -41,7 +41,12 @@
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -81,6 +86,7 @@
 	@GET
 	@Path("/lib_groups")
 	public Iterable<String> getLibGroupList(@Auth UserInfo userInfo,
+											@QueryParam("project_name") @NotBlank String projectName,
 											@QueryParam("exploratory_name") @NotBlank String exploratoryName,
 											@QueryParam("computational_name") String computationalName) {
 
@@ -88,11 +94,11 @@
 				exploratoryName, computationalName);
 		try {
 			if (StringUtils.isEmpty(computationalName)) {
-				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), projectName,
 						exploratoryName);
 				return ExploratoryLibCache.getCache().getLibGroupList(userInfo, userInstance);
 			} else {
-				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), projectName,
 						exploratoryName, computationalName);
 
 				userInstance.setResources(userInstance.getResources().stream()
@@ -120,13 +126,14 @@
 	@GET
 	@Path("/lib_list")
 	public List<Document> getLibList(@Auth UserInfo userInfo,
+									 @QueryParam("project_name") @NotBlank String projectName,
 									 @QueryParam("exploratory_name") @NotBlank String exploratoryName,
 									 @QueryParam("computational_name") String computationalName) {
 
 		log.debug("Loading list of libraries for user {} and exploratory {} and computational {}", userInfo.getName(),
 				exploratoryName, computationalName);
 		try {
-			return libraryService.getLibs(userInfo.getName(), exploratoryName, computationalName);
+			return libraryService.getLibs(userInfo.getName(), projectName, exploratoryName, computationalName);
 
 		} catch (Exception t) {
 			log.error("Cannot load installed libraries for user {} and exploratory {} an", userInfo.getName(),
@@ -147,14 +154,14 @@
 	 */
 	@GET
 	@Path("/lib_list/formatted")
-
 	public List<LibInfoRecord> getLibListFormatted(@Auth UserInfo userInfo,
+												   @QueryParam("project_name") @NotBlank String projectName,
 												   @QueryParam("exploratory_name") @NotBlank String exploratoryName) {
 
 		log.debug("Loading formatted list of libraries for user {} and exploratory {}", userInfo.getName(),
 				exploratoryName);
 		try {
-			return libraryService.getLibInfo(userInfo.getName(), exploratoryName);
+			return libraryService.getLibInfo(userInfo.getName(), projectName, exploratoryName);
 		} catch (Exception t) {
 			log.error("Cannot load list of libraries for user {} and exploratory {}", userInfo.getName(),
 					exploratoryName, t);
@@ -175,15 +182,16 @@
 	public Response libInstall(@Auth UserInfo userInfo,
 							   @Valid @NotNull LibInstallFormDTO formDTO) {
 		log.debug("Installing libs to environment {} for user {}", formDTO, userInfo.getName());
+		String project = formDTO.getProject();
 		final String exploratoryName = formDTO.getNotebookName();
 		final List<LibInstallDTO> libs = formDTO.getLibs();
 		final String computationalName = formDTO.getComputationalName();
 		String uuid = StringUtils.isEmpty(computationalName) ?
-				libraryService.installExploratoryLibs(userInfo, exploratoryName, libs) :
-				libraryService.installComputationalLibs(userInfo, exploratoryName, computationalName, libs);
+				libraryService.installExploratoryLibs(userInfo, project, exploratoryName, libs) :
+				libraryService.installComputationalLibs(userInfo, project, exploratoryName, computationalName, libs);
 		return Response.ok(uuid)
 				.build();
-	}
+    }
 
 	/**
 	 * Returns the list of available libraries for exploratory basing on search conditions provided in @formDTO.
@@ -203,7 +211,7 @@
 
 			if (StringUtils.isNotEmpty(formDTO.getComputationalName())) {
 
-				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getProjectName(),
 						formDTO.getNotebookName(), formDTO.getComputationalName());
 
 				userInstance.setResources(userInstance.getResources().stream()
@@ -211,7 +219,8 @@
 						.collect(Collectors.toList()));
 
 			} else {
-				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getNotebookName());
+				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getProjectName(),
+						formDTO.getNotebookName());
 			}
 
 			return ExploratoryLibCache.getCache().getLibList(userInfo, userInstance, formDTO.getGroup(), formDTO
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
index 8f187b8..6af8729 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
@@ -20,7 +20,11 @@
 package com.epam.dlab.backendapi.resources;
 
 import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.domain.*;
+import com.epam.dlab.backendapi.domain.CreateProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
+import com.epam.dlab.backendapi.domain.UpdateProjectBudgetDTO;
+import com.epam.dlab.backendapi.domain.UpdateProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.ProjectActionFormDTO;
 import com.epam.dlab.backendapi.service.AccessKeyService;
 import com.epam.dlab.backendapi.service.ProjectService;
@@ -38,7 +42,16 @@
 
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
-import javax.ws.rs.*;
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
@@ -76,14 +89,13 @@
 	})
 	@POST
 	@Consumes(MediaType.APPLICATION_JSON)
-	@RolesAllowed("/api/project")
+	@RolesAllowed("/api/project/create")
 	public Response createProject(@Parameter(hidden = true) @Auth UserInfo userInfo,
 								  @Valid CreateProjectDTO projectDTO) {
-
 		projectService.create(userInfo, new ProjectDTO(projectDTO.getName(), projectDTO.getGroups(),
 				projectDTO.getKey(), projectDTO.getTag(), null,
 				projectDTO.getEndpoints().stream().map(e -> new ProjectEndpointDTO(e, UserInstanceStatus.CREATING,
-						null)).collect(Collectors.toList())));
+						null)).collect(Collectors.toList()), projectDTO.isSharedImageEnabled()));
 		final URI uri = uriInfo.getRequestUriBuilder().path(projectDTO.getName()).build();
 		return Response
 				.ok()
@@ -103,8 +115,8 @@
 	@Consumes(MediaType.APPLICATION_JSON)
 	@RolesAllowed("/api/project")
 	public Response startProject(@Parameter(hidden = true) @Auth UserInfo userInfo,
-								 @Valid ProjectActionFormDTO startProjectDto) {
-		projectService.start(userInfo, startProjectDto.getEndpoint(), startProjectDto.getProjectName());
+								 @NotNull @Valid ProjectActionFormDTO startProjectDto) {
+		projectService.start(userInfo, startProjectDto.getEndpoints(), startProjectDto.getProjectName());
 		return Response
 				.accepted()
 				.build();
@@ -122,34 +134,13 @@
 	@Consumes(MediaType.APPLICATION_JSON)
 	@RolesAllowed("/api/project")
 	public Response stopProject(@Parameter(hidden = true) @Auth UserInfo userInfo,
-								@Valid ProjectActionFormDTO stopProjectDTO) {
-		projectService.stop(userInfo, stopProjectDTO.getEndpoint(), stopProjectDTO.getProjectName());
+								@NotNull @Valid ProjectActionFormDTO stopProjectDTO) {
+		projectService.stopWithResources(userInfo, stopProjectDTO.getEndpoints(), stopProjectDTO.getProjectName());
 		return Response
 				.accepted()
 				.build();
 	}
 
-	@Operation(summary = "Stop project on Manage environment popup", tags = "project")
-	@ApiResponses({
-			@ApiResponse(responseCode = "202", description = "Project is stopping"),
-			@ApiResponse(responseCode = "400", description = "Validation error", content = @Content(mediaType =
-					MediaType.APPLICATION_JSON,
-					schema = @Schema(implementation = ErrorDTO.class)))
-	})
-	@Path("managing/stop/{name}")
-	@POST
-	@Consumes(MediaType.APPLICATION_JSON)
-	@RolesAllowed("/api/project")
-	public Response stopProjectWithResources(@Parameter(hidden = true) @Auth UserInfo userInfo,
-											 @Parameter(description = "Project name")
-											 @PathParam("name") String name) {
-		projectService.stopWithResources(userInfo, name);
-		return Response
-				.accepted()
-				.build();
-	}
-
-
 	@Operation(summary = "Get project info", tags = "project")
 	@ApiResponses({
 			@ApiResponse(responseCode = "200", description = "Return information about project",
@@ -180,27 +171,9 @@
 	@GET
 	@Produces(MediaType.APPLICATION_JSON)
 	@RolesAllowed("/api/project")
-	public Response getProjects(@Parameter(hidden = true) @Auth UserInfo userInfo,
-								@Parameter(description = "Project name")
-								@PathParam("name") String name) {
+	public Response getProjects(@Parameter(hidden = true) @Auth UserInfo userInfo) {
 		return Response
-				.ok(projectService.getProjects())
-				.build();
-	}
-
-	@Operation(summary = "Get available projects for managing", tags = "project")
-	@ApiResponses({
-			@ApiResponse(responseCode = "200", description = "Return information about projects",
-					content = @Content(mediaType = MediaType.APPLICATION_JSON, schema =
-					@Schema(implementation = ProjectManagingDTO.class))),
-	})
-	@GET
-	@Path("managing")
-	@Produces(MediaType.APPLICATION_JSON)
-	@RolesAllowed("/api/project")
-	public Response getProjectsForManaging(@Parameter(hidden = true) @Auth UserInfo userInfo) {
-		return Response
-				.ok(projectService.getProjectsForManaging())
+				.ok(projectService.getProjects(userInfo))
 				.build();
 	}
 
@@ -233,7 +206,7 @@
 	@PUT
 	@RolesAllowed("/api/project")
 	public Response updateProject(@Parameter(hidden = true) @Auth UserInfo userInfo, UpdateProjectDTO projectDTO) {
-		projectService.update(userInfo, projectDTO);
+		projectService.update(userInfo, projectDTO, projectDTO.getName());
 		return Response.ok().build();
 	}
 
@@ -247,20 +220,9 @@
 	@POST
 	@Path("terminate")
 	@RolesAllowed("/api/project")
-	public Response removeProjectEndpoint(
-			@Parameter(hidden = true) @Auth UserInfo userInfo,
-			ProjectActionFormDTO projectActionDTO) {
-		projectService.terminateEndpoint(userInfo, projectActionDTO.getEndpoint(), projectActionDTO.getProjectName());
-		return Response.ok().build();
-	}
-
-	@DELETE
-	@Path("{name}")
-	@RolesAllowed("/api/project")
-	public Response removeProject(
-			@Parameter(hidden = true) @Auth UserInfo userInfo,
-			@PathParam("name") String name) {
-		projectService.terminateProject(userInfo, name);
+	public Response removeProjectEndpoint(@Parameter(hidden = true) @Auth UserInfo userInfo,
+										  @NotNull @Valid ProjectActionFormDTO projectActionDTO) {
+		projectService.terminateEndpoint(userInfo, projectActionDTO.getEndpoints(), projectActionDTO.getProjectName());
 		return Response.ok().build();
 	}
 
@@ -281,7 +243,7 @@
 					List<UpdateProjectBudgetDTO> dtos) {
 		final List<ProjectDTO> projects = dtos
 				.stream()
-				.map(dto -> new ProjectDTO(dto.getProject(), null, null, null, dto.getBudget(), null))
+				.map(dto -> ProjectDTO.builder().name(dto.getProject()).budget(dto.getBudget()).build())
 				.collect(Collectors.toList());
 		projectService.updateBudget(projects);
 		return Response.ok().build();
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
index c4f9ee4..dd8f82f 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
@@ -28,7 +28,14 @@
 import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
 
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
@@ -56,12 +63,13 @@
 	 * @return response
 	 */
 	@POST
-	@Path("/{exploratoryName}")
+	@Path("/{projectName}/{exploratoryName}")
 	@Consumes(MediaType.APPLICATION_JSON)
 	public Response updateExploratoryScheduler(@Auth UserInfo userInfo,
+											   @PathParam("projectName") String projectName,
 											   @PathParam("exploratoryName") String exploratoryName,
 											   @SchedulerJobDTOValid SchedulerJobDTO dto) {
-		schedulerJobService.updateExploratorySchedulerData(userInfo.getName(), exploratoryName, dto);
+		schedulerJobService.updateExploratorySchedulerData(userInfo.getName(), projectName, exploratoryName, dto);
 		return Response.ok().build();
 	}
 
@@ -92,16 +100,17 @@
 	 * @return response
 	 */
 	@POST
-	@Path("/{exploratoryName}/{computationalName}")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}")
 	@Consumes(MediaType.APPLICATION_JSON)
 	public Response updateComputationalScheduler(@Auth UserInfo userInfo,
+												 @PathParam("projectName") String projectName,
 												 @PathParam("exploratoryName") String exploratoryName,
 												 @PathParam("computationalName") String computationalName,
 												 @SchedulerJobDTOValid SchedulerJobDTO dto) {
-		schedulerJobService.updateComputationalSchedulerData(userInfo.getName(), exploratoryName,
+		schedulerJobService.updateComputationalSchedulerData(userInfo.getName(), projectName, exploratoryName,
 				computationalName, dto);
 		return Response.ok().build();
-	}
+    }
 
 	/**
 	 * Updates computational resource <code>computationalName<code/> affiliated with exploratory
@@ -132,13 +141,14 @@
 	 * @return scheduler job data
 	 */
 	@GET
-	@Path("/{exploratoryName}")
+	@Path("/{projectName}/{exploratoryName}")
 	@Produces(MediaType.APPLICATION_JSON)
 	public Response fetchSchedulerJobForUserAndExploratory(@Auth UserInfo userInfo,
+														   @PathParam("projectName") String projectName,
 														   @PathParam("exploratoryName") String exploratoryName) {
 		log.debug("Loading scheduler job for user {} and exploratory {}...", userInfo.getName(), exploratoryName);
 		final SchedulerJobDTO schedulerJob =
-				schedulerJobService.fetchSchedulerJobForUserAndExploratory(userInfo.getName(), exploratoryName);
+				schedulerJobService.fetchSchedulerJobForUserAndExploratory(userInfo.getName(), projectName, exploratoryName);
 		return Response.ok(schedulerJob).build();
 	}
 
@@ -152,15 +162,16 @@
 	 * @return scheduler job data
 	 */
 	@GET
-	@Path("/{exploratoryName}/{computationalName}")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}")
 	@Produces(MediaType.APPLICATION_JSON)
 	public Response fetchSchedulerJobForComputationalResource(@Auth UserInfo userInfo,
 															  @PathParam("exploratoryName") String exploratoryName,
+															  @PathParam("projectName") String projectName,
 															  @PathParam("computationalName") String computationalName) {
 		log.debug("Loading scheduler job for user {}, exploratory {} and computational resource {}...",
 				userInfo.getName(), exploratoryName, computationalName);
 		final SchedulerJobDTO schedulerJob = schedulerJobService
-				.fetchSchedulerJobForComputationalResource(userInfo.getName(), exploratoryName, computationalName);
+				.fetchSchedulerJobForComputationalResource(userInfo.getName(), projectName, exploratoryName, computationalName);
 		return Response.ok(schedulerJob).build();
 	}
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SecurityResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SecurityResource.java
deleted file mode 100644
index 031ffa7..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SecurityResource.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.auth.contract.SecurityAPI;
-import com.epam.dlab.auth.dto.UserCredentialDTO;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.SecurityDAO;
-import com.epam.dlab.backendapi.domain.EnvStatusListener;
-import com.epam.dlab.backendapi.roles.UserRoles;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
-import com.epam.dlab.rest.dto.ErrorDTO;
-import com.epam.dlab.validation.AwsValidation;
-import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
-import lombok.extern.slf4j.Slf4j;
-import org.hibernate.validator.constraints.NotBlank;
-
-import javax.validation.Valid;
-import javax.validation.constraints.NotNull;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-
-/**
- * Provides the REST API for the user authorization.
- */
-@Path("/user")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class SecurityResource implements SecurityAPI {
-
-	private SecurityDAO dao;
-	private RESTService securityService;
-	private EnvStatusListener envStatusListener;
-	private SelfServiceApplicationConfiguration configuration;
-
-	@Inject
-	public SecurityResource(SecurityDAO dao,
-							EnvStatusListener envStatusListener, SelfServiceApplicationConfiguration configuration) {
-		this.dao = dao;
-		this.securityService = null;
-		this.envStatusListener = envStatusListener;
-		this.configuration = configuration;
-	}
-
-	/**
-	 * Login method for the DLab user.
-	 *
-	 * @param credential user credential.
-	 * @return 500 Internal Server Error if post response fails.
-	 */
-	@POST
-	@Produces(MediaType.TEXT_PLAIN)
-	@Path("/login")
-	public Response userLogin(@Valid @NotNull UserCredentialDTO credential) {
-		log.debug("Try login for user {}", credential.getUsername());
-		try {
-			dao.writeLoginAttempt(credential);
-			return securityService.post(LOGIN, credential, Response.class);
-		} catch (Exception e) {
-			log.error("Try login for user {} fail", credential.getUsername(), e);
-			final Status internalServerError = Status.INTERNAL_SERVER_ERROR;
-			return Response.status(internalServerError)
-					.entity(new ErrorDTO(internalServerError.getStatusCode(), e.getMessage()))
-					.type(MediaType.APPLICATION_JSON)
-					.build();
-		}
-	}
-
-
-	/**
-	 * Authorize method for the dlab user.
-	 *
-	 * @param userInfo user info.
-	 * @param username user name.
-	 * @return 500 Internal Server Error if post request fails.
-	 */
-	@POST
-	@Consumes(MediaType.TEXT_PLAIN)
-	@Path("/authorize")
-	public Response authorize(@Auth UserInfo userInfo,
-							  @Valid @NotBlank(groups = AwsValidation.class) String username) {
-		log.debug("Try authorize accessToken {} for user info {}", userInfo.getAccessToken(), userInfo);
-		try {
-			Status status = userInfo.getName().equalsIgnoreCase(username) ?
-					Status.OK :
-					Status.FORBIDDEN;
-			if (status == Status.OK) {
-				envStatusListener.registerSession(userInfo);
-				if (configuration.isRolePolicyEnabled()) {
-					UserRoles.initialize(dao, configuration.getRoleDefaultAccess());
-				}
-			}
-			return Response.status(status).build();
-		} catch (Exception e) {
-			throw new DlabException("Cannot authorize user " + username + ". " + e.getLocalizedMessage(), e);
-		}
-	}
-
-	/**
-	 * Logout method for the DLab user.
-	 *
-	 * @param userInfo user info.
-	 * @return 200 OK or 403 Forbidden.
-	 */
-	@POST
-	@Path("/logout")
-	public Response userLogout(@Auth UserInfo userInfo) {
-		log.debug("Try logout for accessToken {}", userInfo.getAccessToken());
-		try {
-			envStatusListener.unregisterSession(userInfo);
-			return securityService.post(LOGOUT, userInfo.getAccessToken(), Response.class);
-		} catch (Exception e) {
-			log.error("Try logout for accessToken {}", userInfo.getAccessToken(), e);
-			final Status internalServerError = Status.INTERNAL_SERVER_ERROR;
-			return Response.status(internalServerError)
-					.entity(new ErrorDTO(internalServerError.getStatusCode(), e.getMessage()))
-					.type(MediaType.APPLICATION_JSON)
-					.build();
-		}
-	}
-}
\ No newline at end of file
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java
index 154ddc2..8cd3381 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java
@@ -20,24 +20,26 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.GroupDTO;
-import com.epam.dlab.backendapi.resources.dto.UpdateRoleGroupDto;
-import com.epam.dlab.backendapi.resources.dto.UpdateUserGroupDto;
 import com.epam.dlab.backendapi.service.UserGroupService;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
-import org.hibernate.validator.constraints.NotEmpty;
 
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import java.util.Set;
 
 @Slf4j
 @Path("group")
-@RolesAllowed("/roleManagement")
 @Consumes(MediaType.APPLICATION_JSON)
 @Produces(MediaType.APPLICATION_JSON)
 public class UserGroupResource {
@@ -51,71 +53,34 @@
 
 
 	@POST
-	public Response createGroup(@Auth UserInfo userInfo,
-								@Valid GroupDTO dto) {
+	@RolesAllowed("/roleManagement/create")
+	public Response createGroup(@Auth UserInfo userInfo, @Valid GroupDTO dto) {
 		log.debug("Creating new group {}", dto.getName());
 		userGroupService.createGroup(dto.getName(), dto.getRoleIds(), dto.getUsers());
 		return Response.ok().build();
 	}
 
 	@PUT
+	@RolesAllowed("/roleManagement")
 	public Response updateGroup(@Auth UserInfo userInfo, @Valid GroupDTO dto) {
 		log.debug("Updating group {}", dto.getName());
-		userGroupService.updateGroup(dto.getName(), dto.getRoleIds(), dto.getUsers());
+		userGroupService.updateGroup(userInfo, dto.getName(), dto.getRoleIds(), dto.getUsers());
 		return Response.ok().build();
 	}
 
 	@GET
+	@RolesAllowed("/roleManagement")
 	public Response getGroups(@Auth UserInfo userInfo) {
 		log.debug("Getting all groups for admin {}...", userInfo.getName());
-		return Response.ok(userGroupService.getAggregatedRolesByGroup()).build();
-	}
-
-	@PUT
-	@Path("role")
-	public Response updateRolesForGroup(@Auth UserInfo userInfo, @Valid UpdateRoleGroupDto updateRoleGroupDto) {
-		log.info("Admin {} is trying to add new group {} to roles {}", userInfo.getName(),
-				updateRoleGroupDto.getGroup(), updateRoleGroupDto.getRoleIds());
-		userGroupService.updateRolesForGroup(updateRoleGroupDto.getGroup(), updateRoleGroupDto.getRoleIds());
-		return Response.ok().build();
-	}
-
-	@DELETE
-	@Path("role")
-	public Response deleteGroupFromRole(@Auth UserInfo userInfo,
-										@QueryParam("group") @NotEmpty Set<String> groups,
-										@QueryParam("roleId") @NotEmpty Set<String> roleIds) {
-		log.info("Admin {} is trying to delete groups {} from roles {}", userInfo.getName(), groups, roleIds);
-		userGroupService.removeGroupFromRole(groups, roleIds);
-		return Response.ok().build();
+		return Response.ok(userGroupService.getAggregatedRolesByGroup(userInfo)).build();
 	}
 
 	@DELETE
 	@Path("{id}")
-	public Response deleteGroup(@Auth UserInfo userInfo,
-								@PathParam("id") String group) {
+	@RolesAllowed("/roleManagement/delete")
+	public Response deleteGroup(@Auth UserInfo userInfo, @PathParam("id") String group) {
 		log.info("Admin {} is trying to delete group {} from application", userInfo.getName(), group);
 		userGroupService.removeGroup(group);
 		return Response.ok().build();
 	}
-
-	@PUT
-	@Path("user")
-	public Response addUserToGroup(@Auth UserInfo userInfo,
-								   @Valid UpdateUserGroupDto updateUserGroupDto) {
-		log.info("Admin {} is trying to add new users {} to group {}", userInfo.getName(),
-				updateUserGroupDto.getUsers(), updateUserGroupDto.getGroup());
-		userGroupService.addUsersToGroup(updateUserGroupDto.getGroup(), updateUserGroupDto.getUsers());
-		return Response.ok().build();
-	}
-
-	@DELETE
-	@Path("user")
-	public Response deleteUserFromGroup(@Auth UserInfo userInfo,
-										@QueryParam("user") @NotEmpty String user,
-										@QueryParam("group") @NotEmpty String group) {
-		log.info("Admin {} is trying to delete user {} from group {}", userInfo.getName(), user, group);
-		userGroupService.removeUserFromGroup(group, user);
-		return Response.ok().build();
-	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java
index b9d0619..52ad739 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java
@@ -26,7 +26,11 @@
 import lombok.extern.slf4j.Slf4j;
 
 import javax.annotation.security.RolesAllowed;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/BillingResourceAws.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/BillingResourceAws.java
deleted file mode 100644
index 1d6535d..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/BillingResourceAws.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.aws;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.resources.dto.aws.AwsBillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
-import org.bson.Document;
-
-import javax.validation.Valid;
-import javax.validation.constraints.NotNull;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-/**
- * Provides API to get or export billing information.
- */
-@Path("/billing")
-@Consumes(MediaType.APPLICATION_JSON)
-public class BillingResourceAws {
-
-	@Inject
-	private BillingService billingService;
-
-	/**
-	 * Returns the billing report.
-	 *
-	 * @param userInfo user info.
-	 * @param formDTO  filter for report data.
-	 */
-	@POST
-	@Path("/report")
-	@Produces(MediaType.APPLICATION_JSON)
-	@SuppressWarnings("unchecked")
-	public Document getBillingReport(@Auth UserInfo userInfo, @Valid @NotNull AwsBillingFilter formDTO) {
-		return billingService.getBillingReport(userInfo, formDTO);
-	}
-
-	/**
-	 * Returns the billing report in csv file.
-	 *
-	 * @param userInfo user info.
-	 * @param formDTO  filter for report data.
-	 */
-
-	@POST
-	@Path("/report/download")
-	@Produces(MediaType.APPLICATION_OCTET_STREAM)
-	@SuppressWarnings("unchecked")
-	public Response downloadBillingReport(@Auth UserInfo userInfo, @Valid @NotNull AwsBillingFilter formDTO) {
-		return Response.ok(billingService.downloadReport(userInfo, formDTO))
-				.header(HttpHeaders.CONTENT_DISPOSITION,
-						"attachment; filename=\"" + billingService.getReportFileName(userInfo, formDTO) + "\"")
-				.build();
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
index b5551be..87f99bd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
@@ -34,13 +34,18 @@
 import com.epam.dlab.rest.contracts.ComputationalAPI;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
-import io.swagger.v3.oas.annotations.Operation;
 import io.swagger.v3.oas.annotations.Parameter;
 import lombok.extern.slf4j.Slf4j;
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -52,17 +57,22 @@
 /**
  * Provides the REST API for the computational resource on AWS.
  */
-@Path("/infrastructure_provision/computational_resources")
+@Path("/aws/infrastructure_provision/computational_resources")
 @Consumes(MediaType.APPLICATION_JSON)
 @Produces(MediaType.APPLICATION_JSON)
 @Slf4j
 public class ComputationalResourceAws implements ComputationalAPI {
-
 	@Inject
 	private SelfServiceApplicationConfiguration configuration;
 	@Inject
 	private ComputationalService computationalService;
 
+	@GET
+	@Path("/{project}/{endpoint}/templates")
+	public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+								 @PathParam("endpoint") String endpoint) {
+		return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+	}
 
 	/**
 	 * Asynchronously creates EMR cluster
@@ -133,13 +143,14 @@
 	 * @return 200 OK if operation is successfully triggered
 	 */
 	@DELETE
-	@Path("/{exploratoryName}/{computationalName}/terminate")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
 	public Response terminate(@Auth UserInfo userInfo,
+							  @PathParam("projectName") String projectName,
 							  @PathParam("exploratoryName") String exploratoryName,
 							  @PathParam("computationalName") String computationalName) {
 		log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+		computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -155,14 +166,15 @@
 	@DELETE
 	@Path("/{project}/{exploratoryName}/{computationalName}/stop")
 	public Response stop(@Auth UserInfo userInfo,
+						 @PathParam("project") String project,
 						 @PathParam("exploratoryName") String exploratoryName,
 						 @PathParam("computationalName") String computationalName) {
 		log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+		computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
 
 		return Response.ok().build();
-	}
+    }
 
 	/**
 	 * Sends request to provisioning service for starting the computational resource for user.
@@ -186,22 +198,24 @@
 	}
 
 	@PUT
-	@Path("dataengine/{exploratoryName}/{computationalName}/config")
+	@Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+										   @PathParam("projectName") String projectName,
 										   @PathParam("exploratoryName") String exploratoryName,
 										   @PathParam("computationalName") String computationalName,
 										   @Valid @NotNull List<ClusterConfig> config) {
 
-		computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+		computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("{exploratoryName}/{computationalName}/config")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
-		return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+		return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
 	}
 
 	private void validate(SparkStandaloneClusterCreateForm form) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
index 6b56386..29f9794 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
@@ -21,26 +21,27 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.auth.rest.UserSessionDurationAuthorizer;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.ComputationalDAO;
-import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
 import com.epam.dlab.backendapi.roles.RoleType;
 import com.epam.dlab.backendapi.roles.UserRoles;
 import com.epam.dlab.backendapi.service.ComputationalService;
-import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
 import com.google.inject.Inject;
-import com.google.inject.name.Named;
 import io.dropwizard.auth.Auth;
+import io.swagger.v3.oas.annotations.Parameter;
 import lombok.extern.slf4j.Slf4j;
 
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -48,27 +49,24 @@
 /**
  * Provides the REST API for the computational resource on Azure.
  */
-@Path("/infrastructure_provision/computational_resources")
+@Path("/azure/infrastructure_provision/computational_resources")
 @Consumes(MediaType.APPLICATION_JSON)
 @Produces(MediaType.APPLICATION_JSON)
 @Slf4j
 public class ComputationalResourceAzure {
+	private final ComputationalService computationalService;
 
 	@Inject
-	private ExploratoryDAO exploratoryDAO;
+	public ComputationalResourceAzure(ComputationalService computationalService) {
+		this.computationalService = computationalService;
+	}
 
-	@Inject
-	private ComputationalDAO computationalDAO;
-
-	@Inject
-	@Named(ServiceConsts.PROVISIONING_SERVICE_NAME)
-	private RESTService provisioningService;
-
-	@Inject
-	private SelfServiceApplicationConfiguration configuration;
-
-	@Inject
-	private ComputationalService computationalService;
+	@GET
+	@Path("/{project}/{endpoint}/templates")
+	public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+								 @PathParam("endpoint") String endpoint) {
+		return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+	}
 
 	/**
 	 * Asynchronously creates computational Spark cluster.
@@ -105,14 +103,15 @@
 	 * @return 200 OK if operation is successfully triggered
 	 */
 	@DELETE
-	@Path("/{exploratoryName}/{computationalName}/terminate")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
 	public Response terminate(@Auth UserInfo userInfo,
+							  @PathParam("projectName") String projectName,
 							  @PathParam("exploratoryName") String exploratoryName,
 							  @PathParam("computationalName") String computationalName) {
 
 		log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+		computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -127,12 +126,13 @@
 	 */
 	@DELETE
 	@Path("/{project}/{exploratoryName}/{computationalName}/stop")
-	public Response stop( @Auth UserInfo userInfo,
+	public Response stop(@Auth UserInfo userInfo,
+						 @PathParam("project") String project,
 						 @PathParam("exploratoryName") String exploratoryName,
 						 @PathParam("computationalName") String computationalName) {
 		log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+		computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -159,21 +159,23 @@
 	}
 
 	@PUT
-	@Path("dataengine/{exploratoryName}/{computationalName}/config")
+	@Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+										   @PathParam("projectName") String projectName,
 										   @PathParam("exploratoryName") String exploratoryName,
 										   @PathParam("computationalName") String computationalName,
 										   @Valid @NotNull List<ClusterConfig> config) {
 
-		computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+		computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("{exploratoryName}/{computationalName}/config")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
-		return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+		return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
index 111bcfa..abf4c6d 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
@@ -58,9 +58,8 @@
 	@Path("computational")
 	public Response updateComputationalLastActivity(CheckInactivityStatusDTO dto) {
 		requestId.checkAndRemove(dto.getRequestId());
-		inactivityService.updateLastActivityForComputational(new UserInfo(dto.getUser(), null),
-				dto.getExploratoryName(),
-				dto.getComputationalName(), toLocalDateTime(dto.getLastActivityUnixTime()));
+		inactivityService.updateLastActivityForComputational(new UserInfo(dto.getUser(), null), null,
+				dto.getExploratoryName(), dto.getComputationalName(), toLocalDateTime(dto.getLastActivityUnixTime()));
 		return Response.ok().build();
 	}
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
index b2da20c..2b286b5 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
@@ -19,7 +19,6 @@
 
 package com.epam.dlab.backendapi.resources.callback;
 
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.ComputationalService;
@@ -29,7 +28,6 @@
 import com.epam.dlab.dto.computational.ComputationalStatusDTO;
 import com.epam.dlab.dto.computational.UserComputationalResource;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.rest.contracts.ApiCallbacks;
 import com.google.inject.Inject;
 import lombok.extern.slf4j.Slf4j;
@@ -42,8 +40,6 @@
 import javax.ws.rs.core.Response;
 import java.util.Date;
 
-import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-
 @Path("/infrastructure_provision/computational_resources")
 @Consumes(MediaType.APPLICATION_JSON)
 @Produces(MediaType.APPLICATION_JSON)
@@ -75,11 +71,12 @@
 		String uuid = dto.getRequestId();
 		requestId.checkAndRemove(uuid);
 
-		UserComputationalResource compResource = computationalService.getComputationalResource(dto.getUser(),
-				dto.getExploratoryName(), dto.getComputationalName()).orElseThrow(() ->
-				new DlabException("Computational resource " + dto.getComputationalName() +
-						" of exploratory environment " + dto.getExploratoryName() + " for user " + dto.getUser() +
-						" doesn't exist"));
+		UserComputationalResource compResource = computationalService.getComputationalResource(dto.getUser(), dto.getProject(),
+				dto.getExploratoryName(), dto.getComputationalName())
+				.orElseThrow(() ->
+						new DlabException(String.format("Computational resource %s of exploratory environment %s of " +
+										"project %s for user %s doesn't exist", dto.getComputationalName(),
+								dto.getExploratoryName(), dto.getProject(), dto.getUser())));
 		log.debug("Current status for computational resource {} of exploratory environment {} for user {} is {}",
 				dto.getComputationalName(), dto.getExploratoryName(), dto.getUser(),
 				compResource.getStatus());
@@ -93,11 +90,6 @@
 			log.debug("Waiting for configuration of the computational resource {} for user {}",
 					dto.getComputationalName(), dto.getUser());
 			requestId.put(dto.getUser(), uuid);
-		} else if (UserInstanceStatus.of(dto.getStatus()) == RUNNING && compResource.isReuploadKeyRequired()) {
-			ResourceData resourceData = ResourceData.computationalResource(compResource.getComputationalId(),
-					dto.getExploratoryName(), dto.getComputationalName());
-			UserInfo userInfo = securityService.getUserInfoOffline(dto.getUser());
-			reuploadKeyService.reuploadKeyAction(userInfo, resourceData);
 		}
 		return Response.ok().build();
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
index 618fb04..d4c059e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
@@ -20,14 +20,12 @@
 package com.epam.dlab.backendapi.resources.callback;
 
 import com.epam.dlab.backendapi.dao.EnvDAO;
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.status.EnvStatusDTO;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.rest.contracts.ApiCallbacks;
 import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
 
 import javax.ws.rs.Consumes;
@@ -63,7 +61,7 @@
             if (UserInstanceStatus.FAILED == UserInstanceStatus.of(dto.getStatus())) {
                 log.warn("Request for the status of resources for user {} fails: {}", dto.getUser(), dto.getErrorMessage());
             } else {
-                envDAO.updateEnvStatus(dto.getUser(), dto.getResourceList());
+                envDAO.updateEnvStatus(dto.getUser(), null, dto.getResourceList());
             }
         } catch (DlabException e) {
             log.warn("Could not update status of resources for user {}: {}", dto.getUser(), e.getLocalizedMessage(), e);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
index 62f6435..c275a18 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
@@ -19,7 +19,6 @@
 
 package com.epam.dlab.backendapi.resources.callback;
 
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
@@ -30,7 +29,6 @@
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.exploratory.ExploratoryStatusDTO;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.rest.contracts.ApiCallbacks;
 import com.google.inject.Inject;
 import lombok.extern.slf4j.Slf4j;
@@ -43,7 +41,11 @@
 import javax.ws.rs.core.Response;
 import java.util.Date;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
 
 
 @Path("/infrastructure_provision/exploratory_environment")
@@ -80,7 +82,7 @@
 				dto.getExploratoryName(), dto.getUser(), dto.getStatus());
 		requestId.checkAndRemove(dto.getRequestId());
 
-		UserInstanceDTO instance = exploratoryService.getUserInstance(dto.getUser(), dto.getExploratoryName())
+		UserInstanceDTO instance = exploratoryService.getUserInstance(dto.getUser(), dto.getProject(), dto.getExploratoryName())
 				.orElseThrow(() -> new DlabException(String.format(USER_INSTANCE_NOT_EXIST_MSG,
 						dto.getExploratoryName(), dto.getUser())));
 
@@ -91,24 +93,19 @@
 		try {
 			exploratoryDAO.updateExploratoryFields(dto.withLastActivity(new Date()));
 			if (currentStatus == TERMINATING) {
-				updateComputationalStatuses(dto.getUser(), dto.getExploratoryName(),
+				updateComputationalStatuses(dto.getUser(), dto.getProject(), dto.getExploratoryName(),
 						UserInstanceStatus.of(dto.getStatus()));
 			} else if (currentStatus == STOPPING) {
-				updateComputationalStatuses(dto.getUser(), dto.getExploratoryName(),
+				updateComputationalStatuses(dto.getUser(), dto.getProject(), dto.getExploratoryName(),
 						UserInstanceStatus.of(dto.getStatus()), TERMINATED, FAILED, TERMINATED, STOPPED);
 			}
 		} catch (DlabException e) {
-			log.error("Could not update status for exploratory environment {} for user {} to {}",
-					dto.getExploratoryName(), dto.getUser(), dto.getStatus(), e);
+			log.error("Could not update status for exploratory environment {} in project {} for user {} to {}",
+					dto.getExploratoryName(), dto.getProject(), dto.getUser(), dto.getStatus(), e);
 			throw new DlabException("Could not update status for exploratory environment " + dto.getExploratoryName() +
 					" for user " + dto.getUser() + " to " + dto.getStatus() + ": " + e.getLocalizedMessage(), e);
 		}
-		if (UserInstanceStatus.of(dto.getStatus()) == RUNNING && instance.isReuploadKeyRequired()) {
-			ResourceData resourceData =
-					ResourceData.exploratoryResource(dto.getExploratoryId(), dto.getExploratoryName());
-			UserInfo userInfo = securityService.getUserInfoOffline(dto.getUser());
-			reuploadKeyService.reuploadKeyAction(userInfo, resourceData);
-		}
+
 		return Response.ok().build();
 	}
 
@@ -116,23 +113,25 @@
 	 * Updates the computational status of exploratory environment.
 	 *
 	 * @param user            user name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus status) {
+	private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus status) {
 		log.debug("updating status for all computational resources of {} for user {}: {}", exploratoryName, user,
 				status);
 		computationalDAO.updateComputationalStatusesForExploratory(new ExploratoryStatusDTO()
 				.withUser(user)
 				.withExploratoryName(exploratoryName)
+				.withProject(project)
 				.withStatus(status));
 	}
 
-	private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus
+	private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus
 			dataEngineStatus, UserInstanceStatus dataEngineServiceStatus, UserInstanceStatus... excludedStatuses) {
 		log.debug("updating status for all computational resources of {} for user {}: DataEngine {}, " +
 				"dataengine-service {}", exploratoryName, user, dataEngineStatus, dataEngineServiceStatus);
-		computationalDAO.updateComputationalStatusesForExploratory(user, exploratoryName, dataEngineStatus,
-				dataEngineServiceStatus, excludedStatuses);
+		computationalDAO.updateComputationalStatusesForExploratory(user, project, exploratoryName,
+				dataEngineStatus, dataEngineServiceStatus, excludedStatuses);
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ImageCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ImageCallback.java
index a3357f6..66b54b6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ImageCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ImageCallback.java
@@ -19,7 +19,6 @@
 
 package com.epam.dlab.backendapi.resources.callback;
 
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.ImageExploratoryService;
 import com.epam.dlab.dto.UserInstanceStatus;
@@ -27,7 +26,6 @@
 import com.epam.dlab.dto.exploratory.ImageStatus;
 import com.epam.dlab.model.exploratory.Image;
 import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
 
 import javax.ws.rs.Consumes;
@@ -45,11 +43,9 @@
 
 	@Inject
 	private ImageExploratoryService imageExploratoryService;
-
 	@Inject
 	private RequestId requestId;
 
-
 	@POST
 	@Path("/image_status")
 	public Response imageCreateStatus(ImageCreateStatusDTO dto) {
@@ -60,11 +56,12 @@
 		return Response.status(Response.Status.CREATED).build();
 	}
 
-
 	private Image getImage(ImageCreateStatusDTO dto) {
 		return Image.builder()
 				.name(dto.getName())
 				.user(dto.getUser())
+				.project(dto.getProject())
+				.endpoint(dto.getEndpoint())
 				.externalName(dto.getImageCreateDTO().getExternalName())
 				.fullName(dto.getImageCreateDTO().getFullName())
 				.status(UserInstanceStatus.FAILED == UserInstanceStatus.of(dto.getStatus()) ?
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/aws/EdgeCallbackAws.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/aws/EdgeCallbackAws.java
deleted file mode 100644
index 696e0ed..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/aws/EdgeCallbackAws.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.aws;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.callback.base.EdgeCallback;
-import com.epam.dlab.dto.aws.edge.EdgeInfoAws;
-import com.epam.dlab.dto.base.keyload.UploadFileResult;
-import com.epam.dlab.rest.contracts.ApiCallbacks;
-import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
-import lombok.extern.slf4j.Slf4j;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-@Path("/infrastructure/edge")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class EdgeCallbackAws extends EdgeCallback {
-
-	@Inject
-	private RequestId requestId;
-
-    public EdgeCallbackAws() {
-        log.info("{} is initialized", getClass().getSimpleName());
-    }
-
-    /**
-     * Stores the result of the upload the user key.
-     *
-     * @param dto result of the upload the user key.
-     * @return 200 OK
-     */
-    @POST
-    @Path(ApiCallbacks.STATUS_URI)
-    public Response status(@Auth UserInfo ui, UploadFileResult<EdgeInfoAws> dto) {
-		requestId.checkAndRemove(dto.getRequestId());
-        handleEdgeCallback(dto.getUser(), dto.getStatus());
-        return Response.ok().build();
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/aws/KeyUploaderCallbackAws.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/aws/KeyUploaderCallbackAws.java
deleted file mode 100644
index b3561ed..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/aws/KeyUploaderCallbackAws.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.aws;
-
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.callback.base.KeyUploaderCallback;
-import com.epam.dlab.dto.aws.edge.EdgeInfoAws;
-import com.epam.dlab.dto.base.keyload.UploadFileResult;
-import com.google.inject.Inject;
-import lombok.extern.slf4j.Slf4j;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-@Path("/user/access_key")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class KeyUploaderCallbackAws {
-	@Inject
-	private KeyUploaderCallback keyUploaderCallback;
-	@Inject
-	private RequestId requestId;
-
-	public KeyUploaderCallbackAws() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	/**
-	 * Stores the result of the upload the user key.
-	 *
-	 * @param dto result of the upload the user key.
-	 * @return 200 OK
-	 */
-	@POST
-	@Path("/callback")
-	public Response loadKeyResponse(UploadFileResult<EdgeInfoAws> dto) {
-		log.debug("Upload the key result and EDGE node info for user {}: {}", dto.getUser(), dto);
-		requestId.checkAndRemove(dto.getRequestId());
-		keyUploaderCallback.handleCallback(dto.getStatus(), dto.getUser(), dto.getEdgeInfo());
-
-		return Response.ok().build();
-
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/azure/EdgeCallbackAzure.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/azure/EdgeCallbackAzure.java
deleted file mode 100644
index 0fe97b3..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/azure/EdgeCallbackAzure.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.azure;
-
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.callback.base.EdgeCallback;
-import com.epam.dlab.dto.azure.edge.EdgeInfoAzure;
-import com.epam.dlab.dto.base.keyload.UploadFileResult;
-import com.epam.dlab.rest.contracts.ApiCallbacks;
-import com.google.inject.Inject;
-import lombok.extern.slf4j.Slf4j;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-@Path("/infrastructure/edge")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class EdgeCallbackAzure extends EdgeCallback {
-
-	@Inject
-	private RequestId requestId;
-
-	public EdgeCallbackAzure() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	/**
-	 * Stores the result of the upload the user key.
-	 *
-	 * @param dto result of the upload the user key.
-	 * @return 200 OK
-	 */
-	@POST
-	@Path(ApiCallbacks.STATUS_URI)
-	public Response status(UploadFileResult<EdgeInfoAzure> dto) {
-		requestId.checkAndRemove(dto.getRequestId());
-		handleEdgeCallback(dto.getUser(), dto.getStatus());
-		return Response.ok().build();
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/azure/KeyUploaderCallbackAzure.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/azure/KeyUploaderCallbackAzure.java
deleted file mode 100644
index b383ce0..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/azure/KeyUploaderCallbackAzure.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.azure;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.callback.base.KeyUploaderCallback;
-import com.epam.dlab.dto.azure.edge.EdgeInfoAzure;
-import com.epam.dlab.dto.base.keyload.UploadFileResult;
-import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
-import lombok.extern.slf4j.Slf4j;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-@Path("/user/access_key")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class KeyUploaderCallbackAzure {
-
-    @Inject
-    private KeyUploaderCallback keyUploaderCallback;
-
-	@Inject
-	private RequestId requestId;
-
-    public KeyUploaderCallbackAzure() {
-        log.info("{} is initialized", getClass().getSimpleName());
-    }
-
-    /**
-     * Stores the result of the upload the user key.
-     *
-     * @param dto result of the upload the user key.
-     * @return 200 OK
-     */
-    @POST
-    @Path("/callback")
-	public Response loadKeyResponse(UploadFileResult<EdgeInfoAzure> dto) {
-        log.debug("Upload the key result and EDGE node info for user {}: {}", dto.getUser(), dto);
-		requestId.checkAndRemove(dto.getRequestId());
-        keyUploaderCallback.handleCallback(dto.getStatus(), dto.getUser(), dto.getEdgeInfo());
-
-        return Response.ok().build();
-
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/base/EdgeCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/base/EdgeCallback.java
deleted file mode 100644
index 48ca592..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/base/EdgeCallback.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.base;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.service.ExploratoryService;
-import com.epam.dlab.backendapi.service.ReuploadKeyService;
-import com.epam.dlab.backendapi.service.SecurityService;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.model.ResourceData;
-import com.google.inject.Inject;
-import lombok.extern.slf4j.Slf4j;
-
-import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-
-@Slf4j
-public class EdgeCallback {
-	@Inject
-	private KeyDAO keyDAO;
-	@Inject
-	private ExploratoryService exploratoryService;
-	@Inject
-	private SecurityService securityService;
-	@Inject
-	private ReuploadKeyService reuploadKeyService;
-
-	protected EdgeCallback() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	protected void handleEdgeCallback(String user, String status) {
-		EdgeInfo edgeInfo = keyDAO.getEdgeInfo(user);
-		log.debug("Current status of edge node for user {} is {}", user,
-				UserInstanceStatus.of(edgeInfo.getEdgeStatus()));
-
-		try {
-			if (UserInstanceStatus.of(status) == UserInstanceStatus.TERMINATED) {
-				log.debug("Removing key for user {}", user);
-				keyDAO.deleteKey(user);
-				keyDAO.removeEdge(user);
-			}
-			log.debug("Updating the status of EDGE node for user {} to {}", user, status);
-			keyDAO.updateEdgeStatus(user, status);
-
-		} catch (DlabException e) {
-			log.error("Could not update status of EDGE node for user {} to {}", user, status, e);
-			throw new DlabException(String.format("Could not update status of EDGE node to %s: %s",
-					status, e.getLocalizedMessage()), e);
-		}
-		if (UserInstanceStatus.of(status) == RUNNING && edgeInfo.isReuploadKeyRequired()) {
-			ResourceData resourceData = ResourceData.edgeResource(edgeInfo.getInstanceId());
-			UserInfo userInfo = securityService.getUserInfoOffline(user);
-			reuploadKeyService.reuploadKeyAction(userInfo, resourceData);
-		}
-	}
-
-
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/base/KeyUploaderCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/base/KeyUploaderCallback.java
deleted file mode 100644
index 5c6e81d..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/base/KeyUploaderCallback.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.base;
-
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.dto.keyload.KeyLoadStatus;
-import com.epam.dlab.exceptions.DlabException;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-
-@Singleton
-@Slf4j
-public class KeyUploaderCallback {
-    @Inject
-    private KeyDAO keyDAO;
-
-    public void handleCallback(String status, String user, EdgeInfo edgeInfo) {
-
-        boolean isSuccess = UserInstanceStatus.of(status) == UserInstanceStatus.RUNNING;
-        try {
-            keyDAO.updateKey(user, KeyLoadStatus.getStatus(isSuccess));
-            if (isSuccess) {
-                keyDAO.updateEdgeInfo(user, edgeInfo);
-            } else {
-                UserInstanceStatus instanceStatus = UserInstanceStatus.of(keyDAO.getEdgeStatus(user));
-                if (instanceStatus == null) {
-                    log.debug("Updating the key status for user {} to error", user);
-                    keyDAO.updateKey(user, "error");
-                } else {
-                    keyDAO.updateEdgeStatus(user, status);
-                }
-            }
-        } catch (DlabException e) {
-            log.error("Could not upload the key result and create EDGE node for user {}", user, e);
-            throw new DlabException("Could not upload the key result and create EDGE node for user " + user + ": " + e.getLocalizedMessage(), e);
-        }
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/gcp/EdgeCallbackGcp.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/gcp/EdgeCallbackGcp.java
deleted file mode 100644
index 57c521f..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/gcp/EdgeCallbackGcp.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.gcp;
-
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.callback.base.EdgeCallback;
-import com.epam.dlab.dto.base.keyload.UploadFileResult;
-import com.epam.dlab.dto.gcp.edge.EdgeInfoGcp;
-import com.epam.dlab.rest.contracts.ApiCallbacks;
-import com.google.inject.Inject;
-import lombok.extern.slf4j.Slf4j;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-@Path("/infrastructure/edge")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class EdgeCallbackGcp extends EdgeCallback {
-
-	@Inject
-	private RequestId requestId;
-
-	public EdgeCallbackGcp() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	/**
-	 * Stores the result of the upload the user key.
-	 *
-	 * @param dto result of the upload the user key.
-	 * @return 200 OK
-	 */
-	@POST
-	@Path(ApiCallbacks.STATUS_URI)
-	public Response status(UploadFileResult<EdgeInfoGcp> dto) {
-		requestId.checkAndRemove(dto.getRequestId());
-		handleEdgeCallback(dto.getUser(), dto.getStatus());
-		return Response.ok().build();
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/gcp/KeyUploaderCallbackGcp.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/gcp/KeyUploaderCallbackGcp.java
deleted file mode 100644
index 3a2ce34..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/gcp/KeyUploaderCallbackGcp.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.callback.gcp;
-
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.callback.base.KeyUploaderCallback;
-import com.epam.dlab.dto.base.keyload.UploadFileResult;
-import com.epam.dlab.dto.gcp.edge.EdgeInfoGcp;
-import com.google.inject.Inject;
-import lombok.extern.slf4j.Slf4j;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-@Path("/user/access_key")
-@Consumes(MediaType.APPLICATION_JSON)
-@Produces(MediaType.APPLICATION_JSON)
-@Slf4j
-public class KeyUploaderCallbackGcp {
-
-	@Inject
-	private KeyUploaderCallback keyUploaderCallback;
-
-	@Inject
-	private RequestId requestId;
-
-	public KeyUploaderCallbackGcp() {
-		log.info("{} is initialized", getClass().getSimpleName());
-	}
-
-	/**
-	 * Stores the result of the upload the user key.
-	 *
-	 * @param dto result of the upload the user key.
-	 * @return 200 OK
-	 */
-	@POST
-	@Path("/callback")
-	public Response loadKeyResponse(UploadFileResult<EdgeInfoGcp> dto) {
-		log.debug("Upload the key result and EDGE node info for user {}: {}", dto.getUser(), dto);
-		requestId.checkAndRemove(dto.getRequestId());
-		keyUploaderCallback.handleCallback(dto.getStatus(), dto.getUser(), dto.getEdgeInfo());
-
-		return Response.ok().build();
-
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/BillingFilter.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/BillingFilter.java
index 60d9c1a..52363a8 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/BillingFilter.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/BillingFilter.java
@@ -20,29 +20,39 @@
 package com.epam.dlab.backendapi.resources.dto;
 
 import com.epam.dlab.dto.UserInstanceStatus;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Data;
+import lombok.NoArgsConstructor;
+import lombok.NonNull;
 
 import java.util.Collections;
 import java.util.List;
 
 @Data
-public abstract class BillingFilter {
-	@JsonProperty
-	protected List<String> user;
+@NoArgsConstructor
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class BillingFilter {
+	@NonNull
+	private List<String> users;
+	@NonNull
 	@JsonProperty("dlab_id")
-	protected String dlabId;
-	@JsonProperty("resource_type")
-	protected List<String> resourceType;
+	private String dlabId;
+	@NonNull
 	@JsonProperty("date_start")
-	protected String dateStart;
+	private String dateStart;
+	@NonNull
 	@JsonProperty("date_end")
-	protected String dateEnd;
-	@JsonProperty("status")
-	protected List<UserInstanceStatus> statuses = Collections.emptyList();
-
-	@JsonProperty("project")
-	protected List<String> projects;
-
-	public abstract List<String> getShapes();
+	private String dateEnd;
+	@NonNull
+	@JsonProperty("resource_type")
+	private List<String> resourceTypes;
+	@NonNull
+	private List<UserInstanceStatus> statuses = Collections.emptyList();
+	@NonNull
+	private List<String> projects;
+	@NonNull
+	private List<String> products;
+	@NonNull
+	private List<String> shapes;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/aws/AwsBillingFilter.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java
similarity index 69%
rename from services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/aws/AwsBillingFilter.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java
index c6cc336..9871918 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/aws/AwsBillingFilter.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java
@@ -17,27 +17,19 @@
  * under the License.
  */
 
-package com.epam.dlab.backendapi.resources.dto.aws;
+package com.epam.dlab.backendapi.resources.dto;
 
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
+import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Data;
 
 import java.util.List;
 
-/**
- * Stores info about billing report filter.
- */
 @Data
-public class AwsBillingFilter extends BillingFilter {
-	@JsonProperty
-	private List<String> product;
-	@JsonProperty
-	private List<String> shape;
-
-
-	@Override
-	public List<String> getShapes() {
-		return shape;
-	}
+public class ComputationalTemplatesDTO {
+    private final List<FullComputationalTemplate> templates;
+    @JsonProperty("user_computations")
+    private final List<String> userComputations;
+    @JsonProperty("project_computations")
+    private final List<String> projectComputations;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryActionFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryActionFormDTO.java
index b953be3..d3361df 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryActionFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryActionFormDTO.java
@@ -19,27 +19,26 @@
 
 package com.epam.dlab.backendapi.resources.dto;
 
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-
+import lombok.AllArgsConstructor;
+import lombok.Data;
+import lombok.NoArgsConstructor;
 import org.hibernate.validator.constraints.NotBlank;
 
-/** Stores info about the action of exploratory notebook. 
- * */
+/**
+ * Stores info about action on the exploratory resource.
+ */
+@Data
+@AllArgsConstructor
+@NoArgsConstructor
+@JsonIgnoreProperties(ignoreUnknown = true)
 public class ExploratoryActionFormDTO {
     @NotBlank
     @JsonProperty("notebook_instance_name")
     private String notebookInstanceName;
 
-    /** Returns the name of notebook instance. */
-    public String getNotebookInstanceName() {
-        return notebookInstanceName;
-    }
-
-    @Override
-    public String toString() {
-    	return MoreObjects.toStringHelper(this)
-        .add("notebookInstanceName", notebookInstanceName)
-        .toString();
-    }
+    @NotBlank
+    @JsonProperty("project_name")
+    private String projectName;
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/Project.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryCreatePopUp.java
similarity index 71%
rename from services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/Project.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryCreatePopUp.java
index 0b40235..d061204 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/documents/Project.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryCreatePopUp.java
@@ -17,25 +17,19 @@
  * under the License.
  */
 
-package com.epam.dlab.billing.gcp.documents;
+package com.epam.dlab.backendapi.resources.dto;
 
+import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Data;
-import org.springframework.data.mongodb.core.mapping.Document;
 
 import java.util.List;
+import java.util.Map;
 
-@Document(collection = "Projects")
 @Data
-public class Project {
-
-	@JsonProperty("name")
-	private String name;
-	private List<Endpoint> endpoints;
-
-
-	@Data
-	public class Endpoint {
-		private final String name;
-	}
+public class ExploratoryCreatePopUp {
+    @JsonProperty("user_projects")
+    private final List<ProjectDTO> userProjects;
+    @JsonProperty("project_exploratories")
+    private final Map<String, List<String>> projectExploratories;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
index 9c3eb30..14193f2 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
@@ -27,11 +27,13 @@
 @Data
 @ToString
 public class ExploratoryImageCreateFormDTO {
-
-	@NotBlank
-	@JsonProperty("exploratory_name")
-	private String notebookName;
-	@NotBlank
-	private final String name;
-	private final String description;
+    @NotBlank
+    private final String name;
+    @NotBlank
+    @JsonProperty("exploratory_name")
+    private String notebookName;
+    @NotBlank
+    @JsonProperty("project_name")
+    private String projectName;
+    private final String description;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java
index b7f9362..17e7b91 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java
@@ -20,13 +20,16 @@
 package com.epam.dlab.backendapi.resources.dto;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
+import lombok.Builder;
+import lombok.Data;
 
 import java.util.List;
 
 /**
  * Stores the health statuses for environment resources.
  */
+@Data
+@Builder
 public class HealthStatusPageDTO {
 	@JsonProperty
 	private String status;
@@ -37,131 +40,11 @@
 	@JsonProperty
 	private boolean admin;
 	@JsonProperty
+	private boolean projectAdmin;
+	@JsonProperty
 	private int billingQuoteUsed;
 	@JsonProperty
 	private int billingUserQuoteUsed;
 	@JsonProperty
 	private boolean projectAssigned;
-
-	/**
-	 * Return the status of environment.
-	 */
-	public String getStatus() {
-		return status;
-	}
-
-	/**
-	 * Set the status of environment.
-	 */
-	public void setStatus(HealthStatusEnum status) {
-		this.status = status == null ? null : status.toString();
-	}
-
-	/**
-	 * Set the status of environment.
-	 */
-	public void setStatus(String status) {
-		this.status = status;
-	}
-
-	public void setBillingEnabled(boolean billingEnabled) {
-		this.billingEnabled = billingEnabled;
-	}
-
-
-	/**
-	 * Set the status of environment.
-	 */
-	public HealthStatusPageDTO withStatus(String status) {
-		setStatus(status);
-		return this;
-	}
-
-	/**
-	 * Set the status of environment.
-	 */
-	public HealthStatusPageDTO withStatus(HealthStatusEnum status) {
-		setStatus(status);
-		return this;
-	}
-
-	public HealthStatusPageDTO withProjectAssinged(boolean isProjectAssigned) {
-		this.projectAssigned = isProjectAssigned;
-		return this;
-	}
-
-	/**
-	 * Return the list of resources.
-	 */
-	public List<HealthStatusResource> getListResources() {
-		return listResources;
-	}
-
-	/**
-	 * Set the list of resources.
-	 */
-	public void setListResources(List<HealthStatusResource> listResources) {
-		this.listResources = listResources;
-	}
-
-	/**
-	 * Set the list of resources.
-	 */
-	public HealthStatusPageDTO withListResources(List<HealthStatusResource> listResources) {
-		setListResources(listResources);
-		return this;
-	}
-
-	/**
-	 * Set billing enabled flag
-	 */
-	public HealthStatusPageDTO withBillingEnabled(boolean billingEnabled) {
-		setBillingEnabled(billingEnabled);
-		return this;
-	}
-
-	@Override
-	public String toString() {
-		return MoreObjects.toStringHelper(this)
-				.add("status", status)
-				.add("listResources", listResources)
-				.add("billingEnabled", billingEnabled)
-				.add("admin", admin)
-				.toString();
-	}
-
-	public HealthStatusPageDTO withAdmin(boolean isAdmin) {
-		this.admin = isAdmin;
-		return this;
-	}
-
-	public HealthStatusPageDTO withBillingQuoteUsed(int billingQuoteUsedPct) {
-		this.billingQuoteUsed = billingQuoteUsedPct;
-		return this;
-	}
-
-	public HealthStatusPageDTO withBillingUserQuoteUsed(int billingUserQuoteUsed) {
-		this.billingUserQuoteUsed = billingUserQuoteUsed;
-		return this;
-	}
-
-	public boolean isBillingEnabled() {
-		return billingEnabled;
-	}
-
-	public boolean isAdmin() {
-		return admin;
-	}
-
-	public boolean isProjectAssigned() {
-		return projectAssigned;
-	}
-
-	public int getBillingQuoteUsed() {
-		return billingQuoteUsed;
-	}
-
-	public int getBillingUserQuoteUsed() {
-		return billingUserQuoteUsed;
-	}
 }
\ No newline at end of file
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ImageInfoRecord.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ImageInfoRecord.java
index d430701..ed722ee 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ImageInfoRecord.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ImageInfoRecord.java
@@ -30,6 +30,7 @@
 	private final String description;
 	private final String project;
 	private final String endpoint;
+	private final String user;
 	private final String application;
 	private final String fullName;
 	private final ImageStatus status;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
index 1a3b8a8..c2b8d1a 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
@@ -39,6 +39,9 @@
     @JsonProperty("computational_name")
     private String computationalName;
 
+    @JsonProperty("project_name")
+    private String project;
+
     @NotEmpty
     @JsonProperty
     private List<LibInstallDTO> libs;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectActionFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectActionFormDTO.java
index 08c2aca..ccdd3c4 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectActionFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectActionFormDTO.java
@@ -22,10 +22,15 @@
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Data;
 
+import javax.validation.constraints.NotNull;
+import java.util.List;
+
 @Data
 public class ProjectActionFormDTO {
+	@NotNull
 	@JsonProperty("project_name")
 	private final String projectName;
+	@NotNull
 	@JsonProperty("endpoint")
-	private final String endpoint;
+	private final List<String> endpoints;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectInfrastructureInfo.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectInfrastructureInfo.java
index 950893e..b9dfd89 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectInfrastructureInfo.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ProjectInfrastructureInfo.java
@@ -19,11 +19,14 @@
 
 package com.epam.dlab.backendapi.resources.dto;
 
+import com.epam.dlab.backendapi.domain.BillingReport;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.AllArgsConstructor;
 import lombok.ToString;
 import org.bson.Document;
 
+import java.util.List;
 import java.util.Map;
 
 @AllArgsConstructor
@@ -37,4 +40,8 @@
 	private Map<String, Map<String, String>> shared;
 	@JsonProperty
 	private Iterable<Document> exploratory;
+	@JsonProperty
+	private List<BillingReport> exploratoryBilling;
+	@JsonProperty
+	private List<EndpointDTO> endpoints;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
index f817c4e..ff6edb6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
@@ -30,6 +30,10 @@
     private String notebookName;
 
     @NotBlank
+    @JsonProperty("project_name")
+    private String projectName;
+
+    @NotBlank
     @JsonProperty
     private String group;
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserResourceInfo.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserResourceInfo.java
index 8b4eeb6..ea1198e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserResourceInfo.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserResourceInfo.java
@@ -54,6 +54,9 @@
 	@JsonProperty("public_ip")
 	private String ip;
 
+	@JsonProperty("cloud_provider")
+	private String cloudProvider;
+
 
 	public UserResourceInfo withResourceType(ResourceEnum resourceType) {
 		setResourceType(resourceType);
@@ -94,4 +97,9 @@
 		setProject(project);
 		return this;
 	}
+
+	public UserResourceInfo withCloudProvider(String cloudProvider) {
+		setCloudProvider(cloudProvider);
+		return this;
+	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
index 21ce26d..5c90602 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
@@ -18,6 +18,7 @@
  */
 package com.epam.dlab.backendapi.resources.dto;
 
+import com.epam.dlab.cloud.CloudProvider;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Getter;
@@ -31,10 +32,11 @@
 @ToString
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class UserRoleDto {
-
 	@JsonProperty("_id")
 	private String id;
 	private String description;
+	private Type type;
+	private CloudProvider cloud;
 	private Set<String> pages;
 	private Set<String> computationals;
 	private Set<String> exploratories;
@@ -42,4 +44,12 @@
 	private Set<String> exploratoryShapes;
 	private Set<String> groups;
 
+	private enum Type {
+		NOTEBOOK,
+		COMPUTATIONAL,
+		NOTEBOOK_SHAPE,
+		COMPUTATIONAL_SHAPE,
+		BILLING,
+		ADMINISTRATION
+	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/gcp/GcpBillingFilter.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/gcp/GcpBillingFilter.java
deleted file mode 100644
index 3d855f2..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/gcp/GcpBillingFilter.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.dto.gcp;
-
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import lombok.Data;
-
-import java.util.List;
-
-@Data
-@JsonIgnoreProperties(ignoreUnknown = true)
-public class GcpBillingFilter extends BillingFilter {
-
-    @JsonProperty
-    private List<String> shape;
-    @JsonProperty
-    private List<String> product;
-
-    @Override
-    public List<String> getShapes() {
-        return shape;
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/BillingResourceGcp.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/BillingResourceGcp.java
deleted file mode 100644
index 7195c75..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/BillingResourceGcp.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources.gcp;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.resources.dto.gcp.GcpBillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
-import org.bson.Document;
-
-import javax.validation.Valid;
-import javax.validation.constraints.NotNull;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-@Path("/billing")
-public class BillingResourceGcp {
-
-    @Inject
-    private BillingService billingService;
-
-    /**
-     * Returns the billing report.
-     *
-     * @param userInfo user info.
-     * @param formDTO  filter for report data.
-     */
-    @POST
-    @Path("/report")
-    @Produces(MediaType.APPLICATION_JSON)
-    @SuppressWarnings("unchecked")
-    public Document getBillingReport(@Auth UserInfo userInfo, @Valid @NotNull GcpBillingFilter formDTO) {
-        return billingService.getBillingReport(userInfo, formDTO);
-    }
-
-    /**
-     * Returns the billing report in csv file.
-     *
-     * @param userInfo user info.
-     * @param formDTO  filter for report data.
-     */
-
-    @POST
-    @Path("/report/download")
-    @Produces(MediaType.APPLICATION_OCTET_STREAM)
-    @SuppressWarnings("unchecked")
-    public Response downloadBillingReport(@Auth UserInfo userInfo, @Valid @NotNull GcpBillingFilter formDTO) {
-        return Response.ok(billingService.downloadReport(userInfo, formDTO))
-                .header(HttpHeaders.CONTENT_DISPOSITION,
-                        "attachment; filename=\"" + billingService.getReportFileName(userInfo, formDTO) + "\"")
-                .build();
-    }
-
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
index ecacf84..087330a 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
@@ -39,7 +39,13 @@
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -50,18 +56,28 @@
 /**
  * Provides the REST API for the computational resource on GCP.
  */
-@Path("/infrastructure_provision/computational_resources")
+@Path("/gcp/infrastructure_provision/computational_resources")
 @Consumes(MediaType.APPLICATION_JSON)
 @Produces(MediaType.APPLICATION_JSON)
 @Slf4j
 public class ComputationalResourceGcp implements ComputationalAPI {
+	private final SelfServiceApplicationConfiguration configuration;
+	private final ComputationalService computationalService;
 
 	@Inject
-	private SelfServiceApplicationConfiguration configuration;
-	@Inject
-	private ComputationalService computationalService;
+	public ComputationalResourceGcp(SelfServiceApplicationConfiguration configuration, ComputationalService computationalService) {
+		this.configuration = configuration;
+		this.computationalService = computationalService;
+	}
 
 
+	@GET
+	@Path("/{project}/{endpoint}/templates")
+	public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+								 @PathParam("endpoint") String endpoint) {
+		return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+	}
+
 	/**
 	 * Asynchronously creates Dataproc cluster
 	 *
@@ -134,13 +150,14 @@
 	 * @return 200 OK if operation is successfully triggered
 	 */
 	@DELETE
-	@Path("/{exploratoryName}/{computationalName}/terminate")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
 	public Response terminate(@Auth UserInfo userInfo,
+							  @PathParam("projectName") String projectName,
 							  @PathParam("exploratoryName") String exploratoryName,
 							  @PathParam("computationalName") String computationalName) {
 		log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+		computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -156,14 +173,15 @@
 	@DELETE
 	@Path("/{project}/{exploratoryName}/{computationalName}/stop")
 	public Response stop(@Auth UserInfo userInfo,
+						 @PathParam("project") String project,
 						 @PathParam("exploratoryName") String exploratoryName,
 						 @PathParam("computationalName") String computationalName) {
 		log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+		computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
 
 		return Response.ok().build();
-	}
+    }
 
 	/**
 	 * Sends request to provisioning service for starting the computational resource for user.
@@ -187,22 +205,24 @@
 	}
 
 	@PUT
-	@Path("dataengine/{exploratoryName}/{computationalName}/config")
+	@Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+										   @PathParam("projectName") String projectName,
 										   @PathParam("exploratoryName") String exploratoryName,
 										   @PathParam("computationalName") String computationalName,
 										   @Valid @NotNull List<ClusterConfig> config) {
 
-		computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+		computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("{exploratoryName}/{computationalName}/config")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
-		return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+		return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
 	}
 
 	private void validate(@Auth UserInfo userInfo, GcpComputationalCreateForm formDTO) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java
index 206b143..e5343dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java
@@ -23,6 +23,7 @@
 import com.google.common.base.MoreObjects.ToStringHelper;
 
 import javax.annotation.Nonnull;
+import java.util.Comparator;
 import java.util.Objects;
 import java.util.Set;
 
@@ -69,10 +70,6 @@
 		this.users = users;
 	}
 
-	UserRole(RoleType type, String name, Set<String> groups, Set<String> users) {
-		this(null, type, name, groups, users);
-	}
-
 	/**
 	 * Return the type of role.
 	 */
@@ -107,8 +104,10 @@
 
 	@Override
 	public int compareTo(@Nonnull UserRole o) {
-		int result = type.compareTo(o.type);
-		return (result == 0 ? name.compareTo(o.name) : result);
+		return Comparator.comparing(UserRole::getType)
+				.thenComparing(UserRole::getName)
+				.thenComparing(UserRole::getId, Comparator.nullsLast(String::compareToIgnoreCase))
+				.compare(this, o);
 	}
 
 	private ToStringHelper toStringHelper(Object self) {
@@ -124,7 +123,7 @@
 		if (this == o) return true;
 		if (o == null || getClass() != o.getClass()) return false;
 		UserRole userRole = (UserRole) o;
-		return this.type.equals(userRole.getType()) && this.name.equals(userRole.getName());
+		return this.id.equals(userRole.getId()) && this.type.equals(userRole.getType()) && this.name.equals(userRole.getName());
 	}
 
 	@Override
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java
index 411f798..9be9578 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java
@@ -28,7 +28,15 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
 
 /**
  * Provides user roles access to features.
@@ -45,6 +53,7 @@
 	 * Node name of user.
 	 */
 	private static final String USERS = "users";
+	private static final String PROJECT_ADMIN_ROLE_NAME = "projectAdmin";
 	private static final String ADMIN_ROLE_NAME = "admin";
 	/**
 	 * Single instance of the user roles.
@@ -95,10 +104,22 @@
 		return checkAccess(userInfo, type, name, true, roles);
 	}
 
+	public static boolean isProjectAdmin(UserInfo userInfo) {
+		final List<UserRole> roles = UserRoles.getRoles();
+		return roles == null || roles.stream().anyMatch(r -> PROJECT_ADMIN_ROLE_NAME.equalsIgnoreCase(r.getId()) &&
+				(userRoles.hasAccessByGroup(userInfo, userInfo.getRoles(), r.getGroups()) || userRoles.hasAccessByUserName(userInfo, r)));
+	}
+
+	public static boolean isProjectAdmin(UserInfo userInfo, Set<String> groups) {
+		final List<UserRole> roles = UserRoles.getRoles();
+		return roles == null || roles.stream().anyMatch(r -> PROJECT_ADMIN_ROLE_NAME.equalsIgnoreCase(r.getId()) &&
+				(userRoles.hasAccessByGroup(userInfo, userInfo.getRoles(), retainGroups(r.getGroups(), groups)) || userRoles.hasAccessByUserName(userInfo, r)));
+	}
+
 	public static boolean isAdmin(UserInfo userInfo) {
 		final List<UserRole> roles = UserRoles.getRoles();
 		return roles == null || roles.stream().anyMatch(r -> ADMIN_ROLE_NAME.equalsIgnoreCase(r.getId()) &&
-				(userRoles.hasAccessByGroup(userInfo, r, userInfo.getRoles()) || userRoles.hasAccessByUserName(userInfo, r)));
+				(userRoles.hasAccessByGroup(userInfo, userInfo.getRoles(), r.getGroups()) || userRoles.hasAccessByUserName(userInfo, r)));
 	}
 
 	/**
@@ -181,12 +202,16 @@
 	 *
 	 * @param type type of role.
 	 * @param name the name of role.
+	 * @return list of UserRole
 	 */
-	private UserRole get(RoleType type, String name) {
-		UserRole item = new UserRole(type, name, null, null);
+	private Set<String> getGroups(RoleType type, String name) {
 		synchronized (roles) {
-			int i = Collections.binarySearch(roles, item);
-			return (i < 0 ? null : roles.get(i));
+			return roles
+					.stream()
+					.filter(r -> type == r.getType() && name.equalsIgnoreCase(r.getName()))
+					.map(UserRole::getGroups)
+					.flatMap(Collection::stream)
+					.collect(Collectors.toSet());
 		}
 	}
 
@@ -233,17 +258,18 @@
 		}
 		LOGGER.trace("Check access for user {} with groups {} to {}/{}", userInfo.getName(), userInfo.getRoles(),
 				type, name);
-		UserRole role = get(type, name);
-		if (role == null) {
+		Set<String> groups = getGroups(type, name);
+		if (groups == null || groups.isEmpty()) {
 			return checkDefault(useDefault);
 		}
-		if (hasAccessByGroup(userInfo, role, roles)) return true;
+		if (hasAccessByGroup(userInfo, roles, groups)) {
+			return true;
+		}
 		LOGGER.trace("Access denied for user {} to {}/{}", userInfo.getName(), type, name);
 		return false;
 	}
 
-	private boolean hasAccessByGroup(UserInfo userInfo, UserRole role, Collection<String> userRoles) {
-		Set<String> groups = role.getGroups();
+	private boolean hasAccessByGroup(UserInfo userInfo, Collection<String> userRoles, Collection<String> groups) {
 		if (groups != null) {
 			if (groups.contains(ANY_USER)) {
 				return true;
@@ -255,7 +281,7 @@
 				}
 			}
 
-			final Optional<String> group = role.getGroups()
+			final Optional<String> group = groups
 					.stream()
 					.filter(g -> userGroups.getOrDefault(g, Collections.emptySet()).contains(userInfo.getName().toLowerCase()))
 					.findAny();
@@ -287,12 +313,20 @@
 		}
 	}
 
+	private static Set<String> retainGroups(Set<String> groups1, Set<String> groups2) {
+		Set<String> result = groups2
+				.stream()
+				.map(String::toLowerCase)
+				.collect(Collectors.toSet());
+		result.retainAll(groups1);
+
+		return result;
+	}
+
 	@Override
 	public String toString() {
 		return MoreObjects.toStringHelper(roles)
 				.addValue(roles)
 				.toString();
 	}
-
-
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/schedulers/billing/BillingScheduler.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/schedulers/billing/BillingScheduler.java
new file mode 100644
index 0000000..45563a2
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/schedulers/billing/BillingScheduler.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.schedulers.billing;
+
+import com.epam.dlab.backendapi.schedulers.internal.Scheduled;
+import com.epam.dlab.backendapi.service.BillingService;
+import com.epam.dlab.backendapi.service.SecurityService;
+import com.google.inject.Inject;
+import lombok.extern.slf4j.Slf4j;
+import org.quartz.Job;
+import org.quartz.JobExecutionContext;
+
+@Scheduled("billingScheduler")
+@Slf4j
+public class BillingScheduler implements Job {
+
+    private final BillingService billingService;
+    private final SecurityService securityService;
+
+    @Inject
+    public BillingScheduler(BillingService billingService, SecurityService securityService) {
+        this.billingService = billingService;
+        this.securityService = securityService;
+    }
+
+    @Override
+    public void execute(JobExecutionContext jobExecutionContext) {
+        log.info("Trying to update billing");
+        try {
+            billingService.updateRemoteBillingData(securityService.getServiceAccountInfo("admin"));
+        } catch (Exception e) {
+            log.error("Something went wrong {}", e.getMessage());
+        }
+    }
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/schedulers/endpoint/CheckEndpointStatusScheduler.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/schedulers/endpoint/CheckEndpointStatusScheduler.java
new file mode 100644
index 0000000..5707553
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/schedulers/endpoint/CheckEndpointStatusScheduler.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.schedulers.endpoint;
+
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.schedulers.internal.Scheduled;
+import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.SecurityService;
+import com.google.inject.Inject;
+import lombok.extern.slf4j.Slf4j;
+import org.quartz.Job;
+import org.quartz.JobExecutionContext;
+
+@Scheduled("checkEndpointStatusScheduler")
+@Slf4j
+public class CheckEndpointStatusScheduler implements Job {
+
+    @Inject
+    private EndpointService endpointService;
+    @Inject
+    private SecurityService securityService;
+
+    @Override
+    public void execute(JobExecutionContext jobExecutionContext) {
+        UserInfo serviceUser = securityService.getServiceAccountInfo("admin");
+        endpointService.getEndpoints().stream()
+                .filter(endpoint -> checkUrlWithStatus(serviceUser, endpoint))
+                .forEach(this::changeStatusToOpposite);
+    }
+
+    private boolean checkUrlWithStatus(UserInfo serviceUser, EndpointDTO endpoint) {
+        try {
+            endpointService.checkUrl(serviceUser, endpoint.getUrl());
+        } catch (Exception e) {
+            log.warn("Failed connecting to endpoint {}, url: '{}'. {}", endpoint.getName(), endpoint.getUrl(), e.getMessage());
+            return endpoint.getStatus() == EndpointDTO.EndpointStatus.ACTIVE;
+        }
+        return endpoint.getStatus() == EndpointDTO.EndpointStatus.INACTIVE;
+    }
+
+    private void changeStatusToOpposite(EndpointDTO endpoint) {
+        if (endpoint.getStatus() == EndpointDTO.EndpointStatus.ACTIVE) {
+            endpointService.updateEndpointStatus(endpoint.getName(), EndpointDTO.EndpointStatus.INACTIVE);
+        } else {
+            endpointService.updateEndpointStatus(endpoint.getName(), EndpointDTO.EndpointStatus.ACTIVE);
+        }
+    }
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/AccessKeyService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/AccessKeyService.java
index 145079e..c037285 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/AccessKeyService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/AccessKeyService.java
@@ -21,17 +21,8 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.KeysDTO;
-import com.epam.dlab.dto.keyload.KeyLoadStatus;
 
 public interface AccessKeyService {
 
-	KeyLoadStatus getUserKeyStatus(String user);
-
-	String uploadKey(UserInfo user, String keyContent, boolean isPrimaryUploading);
-
-	String recoverEdge(UserInfo userInfo);
-
-	String generateKey(UserInfo userInfo, boolean createEdge);
-
 	KeysDTO generateKeys(UserInfo userInfo);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java
index cced07c..b76b141 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java
@@ -20,82 +20,17 @@
 package com.epam.dlab.backendapi.service;
 
 import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.domain.BillingReport;
 import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.epam.dlab.exceptions.DlabException;
-import com.google.inject.Inject;
-import jersey.repackaged.com.google.common.collect.Lists;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
 
-import java.text.ParseException;
 import java.util.List;
 
-@Slf4j
-public abstract class BillingService<T extends BillingFilter> {
+public interface BillingService {
+    BillingReport getBillingReport(UserInfo userInfo, BillingFilter filter);
 
-    @Inject
-    private BillingDAO billingDAO;
+    String downloadReport(UserInfo userInfo, BillingFilter filter);
 
-    public Document getReport(UserInfo userInfo, T filter) {
-        log.trace("Get billing report for user {} with filter {}", userInfo.getName(), filter);
-        try {
-            return billingDAO.getReport(userInfo, filter);
-        } catch (RuntimeException t) {
-            log.error("Cannot load billing report for user {} with filter {}", userInfo.getName(), filter, t);
-            throw new DlabException("Cannot load billing report: " + t.getLocalizedMessage(), t);
-        }
-    }
+    BillingReport getExploratoryBillingData(String project, String endpoint, String exploratoryName, List<String> compNames);
 
-    protected String getValueOrEmpty(Document document, String key) {
-        String value = document.getString(key);
-        return value == null ? "" : value;
-    }
-
-    String getHeaders(boolean full) {
-        return CSVFormatter.formatLine(getHeadersList(full), CSVFormatter.SEPARATOR);
-    }
-
-    public Document getBillingReport(UserInfo userInfo, T filter) {
-        filter.getUser().replaceAll(s -> s.equalsIgnoreCase(BaseBillingDAO.SHARED_RESOURCE_NAME) ? null : s);
-        return getReport(userInfo, filter);
-    }
-
-    public byte[] downloadReport(UserInfo userInfo, T filter) {
-        return prepareReport(getReport(userInfo, filter)).getBytes();
-    }
-
-    String prepareReport(Document document) {
-        try {
-            StringBuilder builder =
-                    new StringBuilder(CSVFormatter.formatLine(Lists.newArrayList(getFirstLine(document)),
-                            CSVFormatter.SEPARATOR, '\"'));
-
-            Boolean full = (Boolean) document.get(BaseBillingDAO.FULL_REPORT);
-            builder.append(getHeaders(full));
-
-            @SuppressWarnings("unchecked")
-            List<Document> items = (List<Document>) document.get(BaseBillingDAO.ITEMS);
-
-            items.forEach(d -> builder.append(getLine(full, d)));
-
-            builder.append(getTotal(full, document));
-
-            return builder.toString();
-        } catch (ParseException e) {
-            throw new DlabException("Cannot prepare CSV file", e);
-        }
-    }
-
-    public abstract String getFirstLine(Document document) throws ParseException;
-
-    public abstract List<String> getHeadersList(boolean full);
-
-    public abstract String getLine(boolean full, Document document);
-
-    public abstract String getTotal(boolean full, Document document);
-
-    public abstract String getReportFileName(UserInfo userInfo, T filter);
+    void updateRemoteBillingData(UserInfo userInfo);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
index 217e18e..4a6f392 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
@@ -21,16 +21,17 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.ComputationalCreateFormDTO;
+import com.epam.dlab.backendapi.resources.dto.ComputationalTemplatesDTO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
-import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
-import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.computational.UserComputationalResource;
 
 import java.util.List;
 import java.util.Optional;
 
 public interface ComputationalService {
+	ComputationalTemplatesDTO getComputationalNamesAndTemplates(UserInfo user, String project, String endpoint);
+
 	/**
 	 * Asynchronously triggers creation of Spark cluster
 	 *
@@ -46,29 +47,25 @@
 	 * Asynchronously triggers termination of computational resources
 	 *
 	 * @param userInfo          user info of authenticated user
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory where to terminate computational resources with
 	 *                          <code>computationalName</code>
 	 * @param computationalName computational name
 	 */
-	void terminateComputational(UserInfo userInfo, String exploratoryName, String computationalName);
+	void terminateComputational(UserInfo userInfo, String project, String exploratoryName, String computationalName);
 
 	boolean createDataEngineService(UserInfo userInfo, ComputationalCreateFormDTO formDTO, UserComputationalResource
 			computationalResource, String project);
 
-	void stopSparkCluster(UserInfo userInfo, String exploratoryName, String computationalName);
+	void stopSparkCluster(UserInfo userInfo, String project, String exploratoryName, String computationalName);
 
 	void startSparkCluster(UserInfo userInfo, String exploratoryName, String computationalName, String project);
 
-	void updateSparkClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName,
+	void updateSparkClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName,
 								  List<ClusterConfig> config);
 
-	void updateComputationalsReuploadKeyFlag(String user, List<UserInstanceStatus> exploratoryStatuses,
-											 List<DataEngineType> computationalTypes,
-											 boolean reuploadKeyRequired,
-											 UserInstanceStatus... computationalStatuses);
-
-	Optional<UserComputationalResource> getComputationalResource(String user, String exploratoryName,
+	Optional<UserComputationalResource> getComputationalResource(String user, String project, String exploratoryName,
 																 String computationalName);
 
-	List<ClusterConfig> getClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName);
+	List<ClusterConfig> getClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EdgeService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EdgeService.java
deleted file mode 100644
index 55661c7..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EdgeService.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.dto.UserInstanceStatus;
-
-public interface EdgeService {
-	String start(UserInfo userInfo);
-
-	String stop(UserInfo userInfo);
-
-	String terminate(UserInfo userInfo);
-
-	void updateReuploadKeyFlag(String user, boolean reuploadKeyRequired, UserInstanceStatus... edgeStatuses);
-
-	String getEdgeInfo(UserInfo userInfo, String projectName);
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EndpointService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EndpointService.java
index aa0b2b7..abd28d6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EndpointService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EndpointService.java
@@ -19,15 +19,30 @@
 
 package com.epam.dlab.backendapi.service;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.EndpointResourcesDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.cloud.CloudProvider;
 
 import java.util.List;
 
 public interface EndpointService {
 	List<EndpointDTO> getEndpoints();
+
+	List<EndpointDTO> getEndpointsWithStatus(EndpointDTO.EndpointStatus status);
+
+	EndpointResourcesDTO getEndpointResources(String endpoint);
+
 	EndpointDTO get(String name);
 
-	void create(EndpointDTO endpointDTO);
+	void create(UserInfo userInfo, EndpointDTO endpointDTO);
 
-	void remove(String name);
+	void updateEndpointStatus(String name, EndpointDTO.EndpointStatus status);
+
+	void remove(UserInfo userInfo, String name, boolean withResources);
+
+	void removeEndpointInAllProjects(UserInfo userInfo, String endpointName, List<ProjectDTO> projects);
+
+    CloudProvider checkUrl(UserInfo userInfo, String url);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
index 65953cd..8abfdd3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
@@ -19,39 +19,30 @@
 
 package com.epam.dlab.backendapi.service;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.UserDTO;
 import com.epam.dlab.backendapi.resources.dto.UserResourceInfo;
 
 import java.util.List;
-import java.util.Set;
 
 public interface EnvironmentService {
-
-	Set<String> getUserNames();
-
 	List<UserDTO> getUsers();
 
-	List<UserResourceInfo> getAllEnv();
+	List<UserResourceInfo> getAllEnv(UserInfo user);
 
 	void stopAll();
 
-	void stopEnvironment(String user);
+	void stopEnvironmentWithServiceAccount(String user);
 
 	void stopEnvironmentWithServiceAccount(String user);
 
 	void stopProjectEnvironment(String project);
 
-	void stopEdge(String user);
+	void stopExploratory(UserInfo userInfo, String user, String project, String exploratoryName);
 
-	void stopExploratory(String user, String exploratoryName);
+	void stopComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName);
 
-	void stopComputational(String user, String exploratoryName, String computationalName);
+	void terminateExploratory(UserInfo userInfo, String user, String project, String exploratoryName);
 
-	void terminateAll();
-
-	void terminateEnvironment(String user);
-
-	void terminateExploratory(String user, String exploratoryName);
-
-	void terminateComputational(String user, String exploratoryName, String computationalName);
+	void terminateComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
index 76956c6..807df17 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
@@ -21,6 +21,8 @@
 
 
 import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.resources.dto.ExploratoryCreatePopUp;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
@@ -28,30 +30,31 @@
 
 import java.util.List;
 import java.util.Optional;
+import java.util.Set;
 
 public interface ExploratoryService {
 
-	String start(UserInfo userInfo, String exploratoryName, String project);
+    String start(UserInfo userInfo, String exploratoryName, String project);
 
-	String stop(UserInfo userInfo, String exploratoryName);
+    String stop(UserInfo userInfo, String project, String exploratoryName);
 
-	String terminate(UserInfo userInfo, String exploratoryName);
+    String terminate(UserInfo userInfo, String project, String exploratoryName);
 
-	String create(UserInfo userInfo, Exploratory exploratory, String project);
+    String create(UserInfo userInfo, Exploratory exploratory, String project);
 
-	void updateExploratoryStatuses(String user, UserInstanceStatus status);
+    void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status);
 
-	void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status);
+    void updateClusterConfig(UserInfo userInfo, String project, String exploratoryName, List<ClusterConfig> config);
 
-	void updateExploratoriesReuploadKeyFlag(String user, boolean reuploadKeyRequired,
-											UserInstanceStatus... exploratoryStatuses);
+    Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName);
 
-	List<UserInstanceDTO> getInstancesWithStatuses(String user, UserInstanceStatus exploratoryStatus,
-												   UserInstanceStatus computationalStatus);
+    Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName, boolean includeCompResources);
 
-	void updateClusterConfig(UserInfo userInfo, String exploratoryName, List<ClusterConfig> config);
+    List<UserInstanceDTO> findAll();
 
-	Optional<UserInstanceDTO> getUserInstance(String user, String exploratoryName);
+    List<UserInstanceDTO> findAll(Set<ProjectDTO> projects);
 
-	List<ClusterConfig> getClusterConfig(UserInfo user, String exploratoryName);
+    List<ClusterConfig> getClusterConfig(UserInfo user, String project, String exploratoryName);
+
+    ExploratoryCreatePopUp getUserInstances(UserInfo user);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
index 791cd05..604bdcf 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
@@ -27,13 +27,13 @@
 
 public interface ImageExploratoryService {
 
-	String createImage(UserInfo user, String exploratoryName, String imageName, String imageDescription);
+    String createImage(UserInfo user, String project, String exploratoryName, String imageName, String imageDescription);
 
-	void finishImageCreate(Image image, String exploratoryName, String newNotebookIp);
+    void finishImageCreate(Image image, String exploratoryName, String newNotebookIp);
 
-	List<ImageInfoRecord> getNotFailedImages(String user, String dockerImage, String project, String endpoint);
+    List<ImageInfoRecord> getNotFailedImages(String user, String dockerImage, String project, String endpoint);
 
-	ImageInfoRecord getImage(String user, String name);
+    ImageInfoRecord getImage(String user, String name, String project, String endpoint);
 
-	List<ImageInfoRecord> getImagesForProject(String project);
+    List<ImageInfoRecord> getImagesForProject(String project);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
index 7b5cd44..038a7b6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
@@ -24,10 +24,10 @@
 
 public interface InactivityService {
 
-	void updateRunningResourcesLastActivity();
+    void updateRunningResourcesLastActivity();
 
-	void updateLastActivityForExploratory(UserInfo userInfo, String exploratoryName, LocalDateTime lastActivity);
+    void updateLastActivityForExploratory(UserInfo userInfo, String exploratoryName, LocalDateTime lastActivity);
 
-	void updateLastActivityForComputational(UserInfo userInfo, String exploratoryName,
-											String computationalName, LocalDateTime lastActivity);
+    void updateLastActivityForComputational(UserInfo userInfo, String project, String exploratoryName,
+                                            String computationalName, LocalDateTime lastActivity);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java
index aa23d1d..ffb3531 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java
@@ -27,9 +27,9 @@
 import java.util.List;
 
 public interface InfrastructureInfoService {
-	List<ProjectInfrastructureInfo> getUserResources(String user);
+	List<ProjectInfrastructureInfo> getUserResources(UserInfo user);
 
-	HealthStatusPageDTO getHeathStatus(UserInfo user, boolean fullReport, boolean isAdmin);
+	HealthStatusPageDTO getHeathStatus(UserInfo user, boolean fullReport);
 
 	InfrastructureMetaInfoDTO getInfrastructureMetaInfo();
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
index 5b98293..bdd22f1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
@@ -27,12 +27,12 @@
 import java.util.List;
 
 public interface LibraryService {
-	List<Document> getLibs(String user, String exploratoryName, String computationalName);
+    List<Document> getLibs(String user, String project, String exploratoryName, String computationalName);
 
-	List<LibInfoRecord> getLibInfo(String user, String exploratoryName);
+    List<LibInfoRecord> getLibInfo(String user, String project, String exploratoryName);
 
-	String installComputationalLibs(UserInfo userInfo, String exploratoryName, String computationalName,
-									List<LibInstallDTO> libs);
+    String installComputationalLibs(UserInfo userInfo, String project, String exploratoryName, String computationalName,
+                                    List<LibInstallDTO> libs);
 
-	String installExploratoryLibs(UserInfo userInfo, String exploratoryName, List<LibInstallDTO> libs);
+    String installExploratoryLibs(UserInfo userInfo, String project, String exploratoryName, List<LibInstallDTO> libs);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
index 0241a68..5362dfc 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
@@ -21,7 +21,6 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
-import com.epam.dlab.backendapi.domain.ProjectManagingDTO;
 import com.epam.dlab.backendapi.domain.UpdateProjectDTO;
 
 import java.util.List;
@@ -29,11 +28,11 @@
 public interface ProjectService {
 	List<ProjectDTO> getProjects();
 
-	List<ProjectManagingDTO> getProjectsForManaging();
+	List<ProjectDTO> getProjects(UserInfo user);
 
 	List<ProjectDTO> getUserProjects(UserInfo userInfo, boolean active);
 
-	List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status);
+	List<ProjectDTO> getProjectsByEndpoint(String endpointName);
 
 	void create(UserInfo userInfo, ProjectDTO projectDTO);
 
@@ -41,19 +40,21 @@
 
 	void terminateEndpoint(UserInfo userInfo, String endpoint, String name);
 
-	void terminateProject(UserInfo userInfo, String name);
+	void terminateEndpoint(UserInfo userInfo, List<String> endpoints, String name);
 
 	void start(UserInfo userInfo, String endpoint, String name);
 
+	void start(UserInfo userInfo, List<String> endpoints, String name);
+
 	void stop(UserInfo userInfo, String endpoint, String name);
 
-	void stopWithResources(UserInfo userInfo, String projectName);
+	void stopWithResources(UserInfo userInfo, List<String> endpoints, String projectName);
 
-	void update(UserInfo userInfo, UpdateProjectDTO projectDTO);
-
-	void updateBudget(String project, Integer budget);
+	void update(UserInfo userInfo, UpdateProjectDTO projectDTO, String projectName);
 
 	void updateBudget(List<ProjectDTO> projects);
 
 	boolean isAnyProjectAssigned(UserInfo userInfo);
+
+	boolean checkExploratoriesAndComputationalProgress(String projectName, List<String> endpoints);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ReuploadKeyService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ReuploadKeyService.java
index 88f1e9e..0c2b199 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ReuploadKeyService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ReuploadKeyService.java
@@ -25,9 +25,5 @@
 
 public interface ReuploadKeyService {
 
-	String reuploadKey(UserInfo user, String keyContent);
-
-	void reuploadKeyAction(UserInfo userInfo, ResourceData resourceData);
-
 	void updateResourceData(ReuploadKeyStatusDTO dto);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
index 1059db0..7702601 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
@@ -25,57 +25,61 @@
 import java.util.List;
 
 public interface SchedulerJobService {
-	/**
-	 * Pulls out scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user            user's name
-	 * @param exploratoryName name of exploratory resource
-	 * @return dto object
-	 */
-	SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String exploratoryName);
+    /**
+     * Pulls out scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user            user's name
+     * @param project         project name
+     * @param exploratoryName name of exploratory resource
+     * @return dto object
+     */
+    SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String project, String exploratoryName);
 
-	/**
-	 * Pulls out scheduler job data for computational resource <code>computationalName<code/> affiliated with
-	 * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user              user's name
-	 * @param exploratoryName   name of exploratory resource
-	 * @param computationalName name of computational resource
-	 * @return dto object
-	 */
-	SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String exploratoryName,
-															  String computationalName);
+    /**
+     * Pulls out scheduler job data for computational resource <code>computationalName<code/> affiliated with
+     * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user              user's name
+     * @param project           project name
+     * @param exploratoryName   name of exploratory resource
+     * @param computationalName name of computational resource
+     * @return dto object
+     */
+    SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String project, String exploratoryName,
+                                                              String computationalName);
 
-	/**
-	 * Updates scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user            user's name
-	 * @param exploratoryName name of exploratory resource
-	 * @param dto             scheduler job data
-	 */
-	void updateExploratorySchedulerData(String user, String exploratoryName, SchedulerJobDTO dto);
+    /**
+     * Updates scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user            user's name
+     * @param project         project name
+     * @param exploratoryName name of exploratory resource
+     * @param dto             scheduler job data
+     */
+    void updateExploratorySchedulerData(String user, String project, String exploratoryName, SchedulerJobDTO dto);
 
-	/**
-	 * Updates scheduler job data for computational resource <code>computationalName<code/> affiliated with
-	 * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user              user's name
-	 * @param exploratoryName   name of exploratory resource
-	 * @param computationalName name of computational resource
-	 * @param dto               scheduler job data
-	 */
-	void updateComputationalSchedulerData(String user, String exploratoryName,
-										  String computationalName, SchedulerJobDTO dto);
+    /**
+     * Updates scheduler job data for computational resource <code>computationalName<code/> affiliated with
+     * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user              user's name
+     * @param project           project name
+     * @param exploratoryName   name of exploratory resource
+     * @param computationalName name of computational resource
+     * @param dto               scheduler job data
+     */
+    void updateComputationalSchedulerData(String user, String project, String exploratoryName,
+                                          String computationalName, SchedulerJobDTO dto);
 
-	void stopComputationalByScheduler();
+    void stopComputationalByScheduler();
 
-	void stopExploratoryByScheduler();
+    void stopExploratoryByScheduler();
 
-	void startExploratoryByScheduler();
+    void startExploratoryByScheduler();
 
-	void startComputationalByScheduler();
+    void startComputationalByScheduler();
 
-	void terminateExploratoryByScheduler();
+    void terminateExploratoryByScheduler();
 
 	void terminateComputationalByScheduler();
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ShapeFormat.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ShapeFormat.java
deleted file mode 100644
index e85a5b9..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ShapeFormat.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service;
-
-public interface ShapeFormat {
-    String format();
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java
index e9e3e47..94e89e3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java
@@ -18,6 +18,7 @@
  */
 package com.epam.dlab.backendapi.service;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 
 import java.util.List;
@@ -26,17 +27,10 @@
 public interface UserGroupService {
 
 	void createGroup(String group, Set<String> roleIds, Set<String> users);
-	void updateGroup(String group, Set<String> roleIds, Set<String> users);
 
-	void addUsersToGroup(String group, Set<String> users);
-
-	void updateRolesForGroup(String group, Set<String> roleIds);
-
-	void removeUserFromGroup(String group, String user);
-
-	void removeGroupFromRole(Set<String> groups, Set<String> roleIds);
+	void updateGroup(UserInfo user, String group, Set<String> roleIds, Set<String> users);
 
 	void removeGroup(String groupId);
 
-	List<UserGroupDto> getAggregatedRolesByGroup();
+	List<UserGroupDto> getAggregatedRolesByGroup(UserInfo user);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserResourceService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserResourceService.java
deleted file mode 100644
index 972e78f..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserResourceService.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-
-package com.epam.dlab.backendapi.service;
-
-import com.epam.dlab.dto.UserInstanceDTO;
-import com.epam.dlab.model.ResourceData;
-
-import java.util.List;
-
-public interface UserResourceService {
-
-	List<ResourceData> convertToResourceData(List<UserInstanceDTO> userInstances);
-
-	void updateReuploadKeyFlagForUserResources(String user, boolean reuploadKeyRequired);
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java
index ce9462f..92e0afb 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java
@@ -31,6 +31,7 @@
 @Singleton
 public class UserRoleServiceImpl implements UserRoleService {
 	private static final String ROLE_NOT_FOUND_MSG = "Any of role : %s were not found";
+
 	@Inject
 	private UserRoleDao userRoleDao;
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsBillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsBillingService.java
deleted file mode 100644
index aa102cb..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsBillingService.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.aws.AwsBillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.epam.dlab.model.aws.ReportLine;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.List;
-
-@Slf4j
-@Singleton
-public class AwsBillingService extends BillingService<AwsBillingFilter> {
-
-	@Override
-	public String getReportFileName(UserInfo userInfo, AwsBillingFilter filter) {
-		return "aws-billing-report.csv";
-	}
-
-	public String getFirstLine(Document document) throws ParseException {
-
-		SimpleDateFormat from = new SimpleDateFormat("yyyy-MM-dd");
-		SimpleDateFormat to = new SimpleDateFormat("MMM dd, yyyy");
-
-		return String.format("Service base name: %s  " +
-						"Resource tag ID: %s  " +
-						"Available reporting period from: %s to: %s",
-				document.get(AwsBillingDAO.SERVICE_BASE_NAME), document.get(AwsBillingDAO.TAG_RESOURCE_ID),
-				to.format(from.parse((String) document.get(AwsBillingDAO.USAGE_DATE_START))),
-				to.format(from.parse((String) document.get(AwsBillingDAO.USAGE_DATE_END))));
-
-	}
-
-	public List<String> getHeadersList(boolean full) {
-		List<String> headers = new ArrayList<>();
-
-		if (full) {
-			headers.add("USER");
-		}
-
-		headers.add("ENVIRONMENT NAME");
-		headers.add("RESOURCE TYPE");
-		headers.add("SHAPE");
-		headers.add("SERVICE");
-		headers.add("SERVICE CHARGES");
-
-		return headers;
-	}
-
-	public String getLine(boolean full, Document document) {
-		List<String> items = new ArrayList<>();
-
-		if (full) {
-			items.add(getValueOrEmpty(document, ReportLine.FIELD_USER_ID));
-		}
-
-		items.add(getValueOrEmpty(document, ReportLine.FIELD_DLAB_ID));
-		items.add(getValueOrEmpty(document, AwsBillingDAO.DLAB_RESOURCE_TYPE));
-		items.add(getValueOrEmpty(document, AwsBillingDAO.SHAPE).replace(System.lineSeparator(), " "));
-		items.add(getValueOrEmpty(document, ReportLine.FIELD_PRODUCT));
-
-		items.add(getValueOrEmpty(document, ReportLine.FIELD_COST)
-				+ " " + getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE));
-
-		return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-	}
-
-	public String getTotal(boolean full, Document document) {
-		int padding = getHeadersList(full).size() - 1;
-
-		List<String> items = new ArrayList<>();
-		while (padding-- > 0) {
-			items.add("");
-		}
-
-		items.add(String.format("Total: %s %s", getValueOrEmpty(document, AwsBillingDAO.COST_TOTAL),
-				getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE)));
-
-		return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureInfoService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureInfoService.java
deleted file mode 100644
index 43335cc..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureInfoService.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.backendapi.service.impl.InfrastructureInfoServiceBase;
-import com.epam.dlab.dto.aws.edge.EdgeInfoAws;
-import com.google.inject.Singleton;
-
-import java.util.HashMap;
-import java.util.Map;
-
-@Singleton
-public class AwsInfrastructureInfoService extends InfrastructureInfoServiceBase<EdgeInfoAws> {
-
-	@Override
-	protected Map<String, String> getSharedInfo(EdgeInfoAws edgeInfo) {
-		Map<String, String> shared = new HashMap<>();
-		shared.put("edge_node_ip", edgeInfo.getPublicIp());
-		shared.put("user_own_bicket_name", edgeInfo.getUserOwnBucketName());
-		shared.put("shared_bucket_name", edgeInfo.getSharedBucketName());
-		return shared;
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureTemplateService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureTemplateService.java
deleted file mode 100644
index 62a8f3f..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureTemplateService.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.resources.dto.aws.AwsEmrConfiguration;
-import com.epam.dlab.backendapi.service.impl.InfrastructureTemplateServiceBase;
-import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
-import com.epam.dlab.dto.imagemetadata.ComputationalMetadataDTO;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.inject.Inject;
-
-public class AwsInfrastructureTemplateService extends InfrastructureTemplateServiceBase {
-
-	@Inject
-	private SelfServiceApplicationConfiguration configuration;
-
-	@Override
-	protected FullComputationalTemplate getCloudFullComputationalTemplate(ComputationalMetadataDTO metadataDTO) {
-		return new AwsFullComputationalTemplate(metadataDTO,
-				AwsEmrConfiguration.builder()
-						.minEmrInstanceCount(configuration.getMinEmrInstanceCount())
-						.maxEmrInstanceCount(configuration.getMaxEmrInstanceCount())
-						.maxEmrSpotInstanceBidPct(configuration.getMaxEmrSpotInstanceBidPct())
-						.minEmrSpotInstanceBidPct(configuration.getMinEmrSpotInstanceBidPct())
-						.build());
-	}
-
-	private class AwsFullComputationalTemplate extends FullComputationalTemplate {
-		@JsonProperty("limits")
-		private AwsEmrConfiguration awsEmrConfiguration;
-
-		AwsFullComputationalTemplate(ComputationalMetadataDTO metadataDTO,
-									 AwsEmrConfiguration awsEmrConfiguration) {
-			super(metadataDTO);
-			this.awsEmrConfiguration = awsEmrConfiguration;
-		}
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureBillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureBillingService.java
deleted file mode 100644
index 672aa7d..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureBillingService.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.azure.AzureBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.azure.AzureBillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.List;
-
-@Slf4j
-@Singleton
-public class AzureBillingService extends BillingService<AzureBillingFilter> {
-
-    @Inject
-    private BillingDAO billingDAO;
-
-    @Override
-    public String getReportFileName(UserInfo userInfo, AzureBillingFilter filter) {
-        return "azure-billing-report.csv";
-    }
-
-    @Override
-    public String getFirstLine(Document document) throws ParseException {
-        SimpleDateFormat from = new SimpleDateFormat("yyyy-MM-dd");
-        SimpleDateFormat to = new SimpleDateFormat("MMM dd, yyyy");
-
-        return String.format("Service base name: %s  " +
-                        "Available reporting period from: %s to: %s",
-                document.get(BaseBillingDAO.SERVICE_BASE_NAME),
-                to.format(from.parse((String) document.get(MongoKeyWords.USAGE_FROM))),
-                to.format(from.parse((String) document.get(MongoKeyWords.USAGE_TO))));
-    }
-
-    public List<String> getHeadersList(boolean full) {
-        List<String> headers = new ArrayList<>();
-
-        if (full) {
-            headers.add("USER");
-        }
-
-        headers.add("ENVIRONMENT NAME");
-        headers.add("RESOURCE TYPE");
-        headers.add("INSTANCE SIZE");
-        headers.add("CATEGORY");
-        headers.add("SERVICE CHARGES");
-
-        return headers;
-    }
-
-    @Override
-    public String getLine(boolean full, Document document) {
-        List<String> items = new ArrayList<>();
-
-        if (full) {
-            items.add(getValueOrEmpty(document, MongoKeyWords.DLAB_USER));
-        }
-
-        items.add(getValueOrEmpty(document, MongoKeyWords.DLAB_ID));
-        items.add(getValueOrEmpty(document, MongoKeyWords.RESOURCE_TYPE));
-        items.add(getValueOrEmpty(document, AzureBillingDAO.SIZE).replace(System.lineSeparator(), " "));
-        items.add(getValueOrEmpty(document, MongoKeyWords.METER_CATEGORY));
-
-        items.add(getValueOrEmpty(document, MongoKeyWords.COST_STRING)
-                + " " + getValueOrEmpty(document, MongoKeyWords.CURRENCY_CODE));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-
-    @Override
-    public String getTotal(boolean full, Document document) {
-        int padding = getHeadersList(full).size() - 1;
-
-        List<String> items = new ArrayList<>();
-        while (padding-- > 0) {
-            items.add("");
-        }
-
-        items.add(String.format("Total: %s %s", getValueOrEmpty(document, MongoKeyWords.COST_STRING),
-                getValueOrEmpty(document, MongoKeyWords.CURRENCY_CODE)));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureInfoService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureInfoService.java
deleted file mode 100644
index 6338152..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureInfoService.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.backendapi.service.impl.InfrastructureInfoServiceBase;
-import com.epam.dlab.dto.azure.edge.EdgeInfoAzure;
-import com.google.inject.Singleton;
-
-import java.util.HashMap;
-import java.util.Map;
-
-@Singleton
-public class AzureInfrastructureInfoService extends InfrastructureInfoServiceBase<EdgeInfoAzure> {
-
-	@Override
-	protected Map<String, String> getSharedInfo(EdgeInfoAzure edgeInfo) {
-		Map<String, String> shared = new HashMap<>();
-		shared.put("edge_node_ip", edgeInfo.getPublicIp());
-		shared.put("user_container_name", edgeInfo.getUserContainerName());
-		shared.put("shared_container_name", edgeInfo.getSharedContainerName());
-		shared.put("user_storage_account_name", edgeInfo.getUserStorageAccountName());
-		shared.put("shared_storage_account_name", edgeInfo.getSharedStorageAccountName());
-		shared.put("datalake_name", edgeInfo.getDataLakeName());
-		shared.put("datalake_user_directory_name", edgeInfo.getDataLakeDirectoryName());
-		shared.put("datalake_shared_directory_name", edgeInfo.getDataLakeSharedDirectoryName());
-		return shared;
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureTemplateService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureTemplateService.java
deleted file mode 100644
index 595ae2e..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureTemplateService.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.backendapi.service.impl.InfrastructureTemplateServiceBase;
-import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
-import com.epam.dlab.dto.imagemetadata.ComputationalMetadataDTO;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-
-@Singleton
-@Slf4j
-public class AzureInfrastructureTemplateService extends InfrastructureTemplateServiceBase {
-
-	@Override
-	protected FullComputationalTemplate getCloudFullComputationalTemplate(ComputationalMetadataDTO metadataDTO) {
-		log.error("Operation is not supported currently");
-		throw new UnsupportedOperationException("Operation is not supported currently");
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpBillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpBillingService.java
deleted file mode 100644
index 6851f33..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpBillingService.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.gcp;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.gcp.GcpBillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.epam.dlab.model.aws.ReportLine;
-import org.bson.Document;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.List;
-
-public class GcpBillingService extends BillingService<GcpBillingFilter> {
-    @Override
-    public String getFirstLine(Document document) throws ParseException {
-        SimpleDateFormat from = new SimpleDateFormat("yyyy-MM-dd");
-        SimpleDateFormat to = new SimpleDateFormat("MMM dd, yyyy");
-
-        return String.format("Service base name: %s Available reporting period from: %s to: %s",
-                document.get(AwsBillingDAO.SERVICE_BASE_NAME),
-                to.format(from.parse((String) document.get("from"))),
-                to.format(from.parse((String) document.get("to"))));
-    }
-
-    @Override
-    public List<String> getHeadersList(boolean full) {
-        List<String> headers = new ArrayList<>();
-
-        if (full) {
-            headers.add("USER");
-        }
-
-        headers.add("ENVIRONMENT NAME");
-        headers.add("RESOURCE TYPE");
-        headers.add("SHAPE");
-        headers.add("SERVICE");
-        headers.add("SERVICE CHARGES");
-
-        return headers;
-    }
-
-    @Override
-    public String getLine(boolean full, Document document) {
-        List<String> items = new ArrayList<>();
-
-        if (full) {
-            items.add(getValueOrEmpty(document, ReportLine.FIELD_USER_ID));
-        }
-
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_DLAB_ID));
-        items.add(getValueOrEmpty(document, AwsBillingDAO.DLAB_RESOURCE_TYPE));
-        items.add(getValueOrEmpty(document, AwsBillingDAO.SHAPE).replace(System.lineSeparator(), " "));
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_PRODUCT));
-
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_COST)
-                + " " + getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-
-    @Override
-    public String getTotal(boolean full, Document document) {
-        int padding = getHeadersList(full).size() - 1;
-
-        List<String> items = new ArrayList<>();
-        while (padding-- > 0) {
-            items.add("");
-        }
-
-        items.add(String.format("Total: %s %s", getValueOrEmpty(document, AwsBillingDAO.COST_TOTAL),
-                getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE)));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-
-    @Override
-    public String getReportFileName(UserInfo userInfo, GcpBillingFilter filter) {
-        return "gcp-billing-report.csv";
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureInfoService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureInfoService.java
deleted file mode 100644
index c517486..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureInfoService.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.gcp;
-
-import com.epam.dlab.backendapi.service.impl.InfrastructureInfoServiceBase;
-import com.epam.dlab.dto.gcp.edge.EdgeInfoGcp;
-import com.google.inject.Singleton;
-
-import java.util.HashMap;
-import java.util.Map;
-
-@Singleton
-public class GcpInfrastructureInfoService extends InfrastructureInfoServiceBase<EdgeInfoGcp> {
-
-	@Override
-	protected Map<String, String> getSharedInfo(EdgeInfoGcp edgeInfo) {
-		Map<String, String> shared = new HashMap<>();
-		shared.put("edge_node_ip", edgeInfo.getPublicIp());
-		shared.put("user_own_bucket_name", edgeInfo.getUserOwnBucketName());
-		shared.put("shared_bucket_name", edgeInfo.getSharedBucketName());
-		return shared;
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureTemplateService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureTemplateService.java
deleted file mode 100644
index 314698d..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureTemplateService.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.gcp;
-
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.resources.dto.gcp.GcpDataprocConfiguration;
-import com.epam.dlab.backendapi.service.impl.InfrastructureTemplateServiceBase;
-import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
-import com.epam.dlab.dto.imagemetadata.ComputationalMetadataDTO;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.inject.Inject;
-
-public class GcpInfrastructureTemplateService extends InfrastructureTemplateServiceBase {
-	@Inject
-	private SelfServiceApplicationConfiguration configuration;
-
-	@Override
-	protected FullComputationalTemplate getCloudFullComputationalTemplate(ComputationalMetadataDTO metadataDTO) {
-		return new GcpFullComputationalTemplate(metadataDTO,
-				GcpDataprocConfiguration.builder()
-						.minInstanceCount(configuration.getMinInstanceCount())
-						.maxInstanceCount(configuration.getMaxInstanceCount())
-						.minDataprocPreemptibleInstanceCount(configuration.getMinDataprocPreemptibleCount())
-						.build());
-	}
-
-
-	private class GcpFullComputationalTemplate extends FullComputationalTemplate {
-		@JsonProperty("limits")
-		private GcpDataprocConfiguration gcpDataprocConfiguration;
-
-		GcpFullComputationalTemplate(ComputationalMetadataDTO metadataDTO,
-									 GcpDataprocConfiguration gcpDataprocConfiguration) {
-			super(metadataDTO);
-			this.gcpDataprocConfiguration = gcpDataprocConfiguration;
-		}
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/AccessKeyServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/AccessKeyServiceImpl.java
index 193e28a..11c8ef4 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/AccessKeyServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/AccessKeyServiceImpl.java
@@ -21,23 +21,11 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.annotation.BudgetLimited;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.resources.dto.KeysDTO;
 import com.epam.dlab.backendapi.service.AccessKeyService;
-import com.epam.dlab.backendapi.service.ReuploadKeyService;
-import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.dto.base.keyload.UploadFile;
-import com.epam.dlab.dto.keyload.KeyLoadStatus;
-import com.epam.dlab.dto.keyload.UserKeyDTO;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
-import com.google.inject.name.Named;
 import com.jcraft.jsch.JSch;
 import com.jcraft.jsch.JSchException;
 import com.jcraft.jsch.KeyPair;
@@ -46,90 +34,12 @@
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 
-import static com.epam.dlab.constants.ServiceConsts.PROVISIONING_SERVICE_NAME;
-import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
-import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
-import static com.epam.dlab.rest.contracts.EdgeAPI.EDGE_CREATE;
-
 @Singleton
 @Slf4j
 public class AccessKeyServiceImpl implements AccessKeyService {
-
-	@Inject
-	private KeyDAO keyDAO;
-	@Inject
-	@Named(PROVISIONING_SERVICE_NAME)
-	private RESTService provisioningService;
-	@Inject
-	private RequestBuilder requestBuilder;
-	@Inject
-	private RequestId requestId;
 	@Inject
 	private SelfServiceApplicationConfiguration configuration;
-	@Inject
-	private ReuploadKeyService reuploadKeyService;
 
-	@Override
-	public KeyLoadStatus getUserKeyStatus(String user) {
-		log.debug("Check the status of the user key for {}", user);
-		try {
-			return keyDAO.findKeyStatus(user);
-		} catch (DlabException e) {
-			log.error("Check the status of the user key for {} fails", user, e);
-			return KeyLoadStatus.ERROR;
-		}
-	}
-
-	@BudgetLimited
-	@Override
-	public String uploadKey(UserInfo user, String keyContent, boolean isPrimaryUploading) {
-		log.debug(isPrimaryUploading ? "The key uploading and EDGE node creating for user {} is starting..." :
-				"The key reuploading for user {} is starting...", user);
-		keyDAO.upsertKey(user.getName(), keyContent, isPrimaryUploading);
-		try {
-			return isPrimaryUploading ? createEdge(user, keyContent) : reuploadKeyService.reuploadKey(user,
-					keyContent);
-		} catch (Exception e) {
-			log.error(isPrimaryUploading ? "The key uploading and EDGE node creating for user {} fails" :
-					"The key reuploading for user {} fails", user.getName(), e);
-			keyDAO.deleteKey(user.getName());
-			throw new DlabException(isPrimaryUploading ? "Could not upload the key and create EDGE node: " :
-					"Could not reupload the key. Previous key has been deleted: " + e.getLocalizedMessage(), e);
-		}
-	}
-
-	@BudgetLimited
-	@Override
-	public String recoverEdge(UserInfo userInfo) {
-		log.debug("Recreating edge node for user {}", userInfo.getName());
-		try {
-			String userName = userInfo.getName();
-			EdgeInfo edgeInfo = getEdgeInfo(userName);
-			UserKeyDTO key = keyDAO.fetchKey(userName, KeyLoadStatus.SUCCESS);
-			updateEdgeStatusToCreating(userName, edgeInfo);
-			return createEdge(userInfo, key.getContent());
-		} catch (Exception e) {
-			log.error("Could not create the EDGE node for user {}", userInfo.getName(), e);
-			keyDAO.updateEdgeStatus(userInfo.getName(), FAILED.toString());
-			throw new DlabException("Could not upload the key and create EDGE node: " + e.getLocalizedMessage(), e);
-		}
-	}
-
-	@Override
-	public String generateKey(UserInfo userInfo, boolean createEdge) {
-		log.debug("Generating new key pair for user {}", userInfo.getName());
-		try (ByteArrayOutputStream publicKeyOut = new ByteArrayOutputStream();
-			 ByteArrayOutputStream privateKeyOut = new ByteArrayOutputStream()) {
-			KeyPair pair = KeyPair.genKeyPair(new JSch(), KeyPair.RSA, configuration.getPrivateKeySize());
-			pair.writePublicKey(publicKeyOut, userInfo.getName());
-			pair.writePrivateKey(privateKeyOut);
-			uploadKey(userInfo, new String(publicKeyOut.toByteArray()), createEdge);
-			return new String(privateKeyOut.toByteArray());
-		} catch (JSchException | IOException e) {
-			log.error("Can not generate private/public key pair due to: {}", e.getMessage());
-			throw new DlabException("Can not generate private/public key pair due to: " + e.getMessage(), e);
-		}
-	}
 
 	@Override
 	public KeysDTO generateKeys(UserInfo userInfo) {
@@ -146,33 +56,4 @@
 			throw new DlabException("Can not generate private/public key pair due to: " + e.getMessage(), e);
 		}
 	}
-
-	private EdgeInfo getEdgeInfo(String userName) {
-		EdgeInfo edgeInfo = keyDAO.getEdgeInfo(userName);
-		UserInstanceStatus status = UserInstanceStatus.of(edgeInfo.getEdgeStatus());
-		if (status == null || !status.in(FAILED, TERMINATED)) {
-			log.error("Could not create EDGE node for user {} because the status of instance is {}", userName,
-					status);
-			throw new DlabException("Could not create EDGE node because the status of instance is " + status);
-		}
-		return edgeInfo;
-	}
-
-	private void updateEdgeStatusToCreating(String userName, EdgeInfo edgeInfo) {
-		edgeInfo.setInstanceId(null);
-		edgeInfo.setEdgeStatus(UserInstanceStatus.CREATING.toString());
-		try {
-			keyDAO.updateEdgeInfo(userName, edgeInfo);
-		} catch (DlabException e) {
-			log.error("Could not update the status of EDGE node for user {}", userName, e);
-			throw new DlabException("Could not create EDGE node: " + e.getLocalizedMessage(), e);
-		}
-	}
-
-	private String createEdge(UserInfo user, String keyContent) {
-		UploadFile uploadFile = requestBuilder.newEdgeKeyUpload(user, keyContent);
-		String uuid = provisioningService.post(EDGE_CREATE, user.getAccessToken(), uploadFile, String.class);
-		requestId.put(user.getName(), uuid);
-		return uuid;
-	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImpl.java
new file mode 100644
index 0000000..8eae49b
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImpl.java
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.service.impl;
+
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
+import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
+import com.epam.dlab.backendapi.domain.BillingReport;
+import com.epam.dlab.backendapi.domain.BillingReportLine;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
+import com.epam.dlab.backendapi.resources.dto.BillingFilter;
+import com.epam.dlab.backendapi.roles.RoleType;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.backendapi.service.BillingService;
+import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.backendapi.util.BillingUtils;
+import com.epam.dlab.cloud.CloudProvider;
+import com.epam.dlab.constants.ServiceConsts;
+import com.epam.dlab.dto.UserInstanceStatus;
+import com.epam.dlab.dto.billing.BillingData;
+import com.epam.dlab.dto.billing.BillingResourceType;
+import com.epam.dlab.exceptions.DlabException;
+import com.epam.dlab.rest.client.RESTService;
+import com.google.common.collect.Lists;
+import com.google.inject.Inject;
+import com.google.inject.name.Named;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.collections4.CollectionUtils;
+import org.apache.http.client.utils.URIBuilder;
+
+import javax.ws.rs.core.GenericType;
+import java.math.BigDecimal;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.time.LocalDate;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+@Slf4j
+public class BillingServiceImpl implements BillingService {
+    private static final String BILLING_PATH = "/api/billing";
+    private static final String USAGE_DATE_FORMAT = "yyyy-MM";
+
+    private final ProjectService projectService;
+    private final EndpointService endpointService;
+    private final ExploratoryService exploratoryService;
+    private final SelfServiceApplicationConfiguration configuration;
+    private final RESTService provisioningService;
+    private final ImageExploratoryDao imageExploratoryDao;
+    private final BillingDAO billingDAO;
+    private final String sbn;
+
+    @Inject
+    public BillingServiceImpl(ProjectService projectService, EndpointService endpointService,
+                              ExploratoryService exploratoryService, SelfServiceApplicationConfiguration configuration,
+                              @Named(ServiceConsts.BILLING_SERVICE_NAME) RESTService provisioningService, ImageExploratoryDao imageExploratoryDao,
+                              BillingDAO billingDAO) {
+        this.projectService = projectService;
+        this.endpointService = endpointService;
+        this.exploratoryService = exploratoryService;
+        this.configuration = configuration;
+        this.provisioningService = provisioningService;
+        this.imageExploratoryDao = imageExploratoryDao;
+        this.billingDAO = billingDAO;
+        sbn = configuration.getServiceBaseName();
+    }
+
+    @Override
+    public BillingReport getBillingReport(UserInfo user, BillingFilter filter) {
+        setUserFilter(user, filter);
+        List<BillingReportLine> billingReportLines = billingDAO.aggregateBillingData(filter)
+                .stream()
+                .peek(this::appendStatuses)
+                .filter(bd -> CollectionUtils.isEmpty(filter.getStatuses()) || filter.getStatuses().contains(bd.getStatus()))
+                .collect(Collectors.toList());
+        final LocalDate min = billingReportLines.stream().min(Comparator.comparing(BillingReportLine::getUsageDateFrom)).map(BillingReportLine::getUsageDateFrom).orElse(null);
+        final LocalDate max = billingReportLines.stream().max(Comparator.comparing(BillingReportLine::getUsageDateTo)).map(BillingReportLine::getUsageDateTo).orElse(null);
+        final double sum = billingReportLines.stream().mapToDouble(BillingReportLine::getCost).sum();
+        final String currency = billingReportLines.stream().map(BillingReportLine::getCurrency).distinct().count() == 1 ? billingReportLines.get(0).getCurrency() : null;
+        return BillingReport.builder()
+                .name("Billing report")
+                .sbn(sbn)
+                .reportLines(billingReportLines)
+                .usageDateFrom(min)
+                .usageDateTo(max)
+                .totalCost(new BigDecimal(sum).setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue())
+                .currency(currency)
+                .isFull(isFullReport(user))
+                .build();
+    }
+
+    @Override
+    public String downloadReport(UserInfo user, BillingFilter filter) {
+        boolean isFull = isFullReport(user);
+        BillingReport report = getBillingReport(user, filter);
+        StringBuilder builder = new StringBuilder(BillingUtils.getFirstLine(report.getSbn(), report.getUsageDateFrom(), report.getUsageDateTo()));
+        builder.append(BillingUtils.getHeader(isFull));
+        try {
+            report.getReportLines().forEach(r -> builder.append(BillingUtils.printLine(r, isFull)));
+            builder.append(BillingUtils.getTotal(report.getTotalCost(), report.getCurrency()));
+            return builder.toString();
+        } catch (Exception e) {
+            log.error("Cannot write billing data ", e);
+            throw new DlabException("Cannot write billing file ", e);
+        }
+    }
+
+    public BillingReport getExploratoryBillingData(String project, String endpoint, String exploratoryName, List<String> compNames) {
+        List<String> resourceNames = new ArrayList<>(compNames);
+        resourceNames.add(exploratoryName);
+        List<BillingReportLine> billingData = billingDAO.findBillingData(project, endpoint, resourceNames)
+                .stream()
+                .peek(bd -> bd.setCost(BigDecimal.valueOf(bd.getCost()).setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue()))
+                .collect(Collectors.toList());
+        final double sum = billingData.stream().mapToDouble(BillingReportLine::getCost).sum();
+        final String currency = billingData.stream().map(BillingReportLine::getCurrency).distinct().count() == 1 ? billingData.get(0).getCurrency() : null;
+        return BillingReport.builder()
+                .name(exploratoryName)
+                .reportLines(billingData)
+                .totalCost(new BigDecimal(sum).setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue())
+                .currency(currency)
+                .build();
+    }
+
+    public void updateRemoteBillingData(UserInfo userInfo) {
+        List<EndpointDTO> endpoints = endpointService.getEndpoints();
+        if (CollectionUtils.isEmpty(endpoints)) {
+            log.error("Cannot update billing info. There are no endpoints");
+            throw new DlabException("Cannot update billing info. There are no endpoints");
+        }
+
+        Map<EndpointDTO, List<BillingData>> billingDataMap = endpoints
+                .stream()
+                .collect(Collectors.toMap(e -> e, e -> getBillingData(userInfo, e)));
+
+        billingDataMap.forEach((endpointDTO, billingData) -> {
+            log.info("Updating billing information for endpoint {}. Billing data {}", endpointDTO.getName(), billingData);
+            try {
+                updateBillingData(endpointDTO, billingData);
+            } catch (Exception e) {
+                log.error("Something went wrong while trying to update billing for {}. {}", endpointDTO.getName(), e.getMessage());
+            }
+        });
+    }
+
+    private Map<String, BillingReportLine> getBillableResources() {
+        Set<ProjectDTO> projects = new HashSet<>(projectService.getProjects());
+        final Stream<BillingReportLine> ssnBillingDataStream = BillingUtils.ssnBillingDataStream(sbn);
+        final Stream<BillingReportLine> billableEdges = projects
+                .stream()
+                .collect(Collectors.toMap(ProjectDTO::getName, ProjectDTO::getEndpoints))
+                .entrySet()
+                .stream()
+                .flatMap(e -> projectEdges(sbn, e.getKey(), e.getValue()));
+        final Stream<BillingReportLine> billableSharedEndpoints = endpointService.getEndpoints()
+                .stream()
+                .flatMap(endpoint -> BillingUtils.sharedEndpointBillingDataStream(endpoint.getName(), sbn));
+        final Stream<BillingReportLine> billableUserInstances = exploratoryService.findAll(projects)
+                .stream()
+                .filter(userInstance -> Objects.nonNull(userInstance.getExploratoryId()))
+                .flatMap(ui -> BillingUtils.exploratoryBillingDataStream(ui, configuration.getMaxSparkInstanceCount()));
+        final Stream<BillingReportLine> customImages = projects
+                .stream()
+                .map(p -> imageExploratoryDao.getImagesForProject(p.getName()))
+                .flatMap(Collection::stream)
+                .flatMap(i -> BillingUtils.customImageBillingDataStream(i, sbn));
+
+        final Map<String, BillingReportLine> billableResources = Stream.of(ssnBillingDataStream, billableEdges, billableSharedEndpoints, billableUserInstances, customImages)
+                .flatMap(s -> s)
+                .collect(Collectors.toMap(BillingReportLine::getDlabId, b -> b));
+        log.debug("Billable resources are: {}", billableResources);
+
+        return billableResources;
+    }
+
+    private Stream<BillingReportLine> projectEdges(String serviceBaseName, String projectName, List<ProjectEndpointDTO> endpoints) {
+        return endpoints
+                .stream()
+                .flatMap(endpoint -> BillingUtils.edgeBillingDataStream(projectName, serviceBaseName, endpoint.getName()));
+    }
+
+    private void updateBillingData(EndpointDTO endpointDTO, List<BillingData> billingData) {
+        final String endpointName = endpointDTO.getName();
+        final CloudProvider cloudProvider = endpointDTO.getCloudProvider();
+        final Map<String, BillingReportLine> billableResources = getBillableResources();
+        final Stream<BillingReportLine> billingReportLineStream = billingData
+                .stream()
+                .peek(bd -> bd.setApplication(endpointName))
+                .map(bd -> toBillingReport(bd, getOrDefault(billableResources, bd.getTag())));
+
+        if (cloudProvider == CloudProvider.GCP) {
+            final Map<String, List<BillingReportLine>> gcpBillingData = billingReportLineStream
+                    .collect(Collectors.groupingBy(bd -> bd.getUsageDate().substring(0, USAGE_DATE_FORMAT.length())));
+            updateGcpBillingData(endpointName, gcpBillingData);
+        } else if (cloudProvider == CloudProvider.AWS) {
+            final Map<String, List<BillingReportLine>> awsBillingData = billingReportLineStream
+                    .collect(Collectors.groupingBy(BillingReportLine::getUsageDate));
+            updateAwsBillingData(endpointName, awsBillingData);
+        } else if (cloudProvider == CloudProvider.AZURE) {
+            final List<BillingReportLine> billingReportLines = billingReportLineStream
+                    .collect(Collectors.toList());
+            updateAzureBillingData(billingReportLines);
+        }
+    }
+
+    private BillingReportLine getOrDefault(Map<String, BillingReportLine> billableResources, String tag) {
+        return billableResources.getOrDefault(tag, BillingReportLine.builder().dlabId(tag).build());
+    }
+
+    private void updateGcpBillingData(String endpointName, Map<String, List<BillingReportLine>> billingData) {
+        billingData.forEach((usageDate, billingReportLines) -> {
+            billingDAO.deleteByUsageDateRegex(endpointName, usageDate);
+            billingDAO.save(billingReportLines);
+        });
+    }
+
+    private void updateAwsBillingData(String endpointName, Map<String, List<BillingReportLine>> billingData) {
+        billingData.forEach((usageDate, billingReportLines) -> {
+            billingDAO.deleteByUsageDate(endpointName, usageDate);
+            billingDAO.save(billingReportLines);
+        });
+    }
+
+    private void updateAzureBillingData(List<BillingReportLine> billingReportLines) {
+        billingDAO.save(billingReportLines);
+    }
+
+    private List<BillingData> getBillingData(UserInfo userInfo, EndpointDTO e) {
+        try {
+            return provisioningService.get(getBillingUrl(e.getUrl(), BILLING_PATH), userInfo.getAccessToken(),
+                    new GenericType<List<BillingData>>() {
+                    });
+        } catch (Exception ex) {
+            log.error("Cannot retrieve billing information for {}. {}", e.getName(), ex.getMessage());
+            return Collections.emptyList();
+        }
+    }
+
+    private String getBillingUrl(String endpointUrl, String path) {
+        URI uri;
+        try {
+            uri = new URI(endpointUrl);
+        } catch (URISyntaxException e) {
+            log.error("Wrong URI syntax {}", e.getMessage(), e);
+            throw new DlabException("Wrong URI syntax");
+        }
+        return new URIBuilder()
+                .setScheme(uri.getScheme())
+                .setHost(uri.getHost())
+                .setPort(8088)
+                .setPath(path)
+                .toString();
+    }
+
+    private void appendStatuses(BillingReportLine br) {
+        BillingResourceType resourceType = br.getResourceType();
+        if (BillingResourceType.EDGE == resourceType) {
+            projectService.get(br.getProject()).getEndpoints()
+                    .stream()
+                    .filter(e -> e.getName().equals(br.getResourceName()))
+                    .findAny()
+                    .ifPresent(e -> br.setStatus(e.getStatus()));
+        } else if (BillingResourceType.EXPLORATORY == resourceType) {
+            exploratoryService.getUserInstance(br.getUser(), br.getProject(), br.getResourceName())
+                    .ifPresent(ui -> br.setStatus(UserInstanceStatus.of(ui.getStatus())));
+        } else if (BillingResourceType.COMPUTATIONAL == resourceType) {
+            exploratoryService.getUserInstance(br.getUser(), br.getProject(), br.getExploratoryName(), true)
+                    .flatMap(ui -> ui.getResources()
+                            .stream()
+                            .filter(cr -> cr.getComputationalName().equals(br.getResourceName()))
+                            .findAny())
+                    .ifPresent(cr -> br.setStatus(UserInstanceStatus.of(cr.getStatus())));
+        }
+    }
+
+    private boolean isFullReport(UserInfo userInfo) {
+        return UserRoles.checkAccess(userInfo, RoleType.PAGE, "/api/infrastructure_provision/billing",
+                userInfo.getRoles());
+    }
+
+    private void setUserFilter(UserInfo userInfo, BillingFilter filter) {
+        if (!isFullReport(userInfo)) {
+            filter.setUsers(Lists.newArrayList(userInfo.getName()));
+        }
+    }
+
+    private BillingReportLine toBillingReport(BillingData billingData, BillingReportLine billingReportLine) {
+        return BillingReportLine.builder()
+                .application(billingData.getApplication())
+                .cost(billingData.getCost())
+                .currency(billingData.getCurrency())
+                .product(billingData.getProduct())
+                .project(billingReportLine.getProject())
+                .endpoint(billingReportLine.getEndpoint())
+                .usageDateFrom(billingData.getUsageDateFrom())
+                .usageDateTo(billingData.getUsageDateTo())
+                .usageDate(billingData.getUsageDate())
+                .usageType(billingData.getUsageType())
+                .user(billingReportLine.getUser())
+                .dlabId(billingData.getTag())
+                .resourceType(billingReportLine.getResourceType())
+                .resourceName(billingReportLine.getResourceName())
+                .shape(billingReportLine.getShape())
+                .exploratoryName(billingReportLine.getExploratoryName())
+                .build();
+    }
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
index 5133485..722ee4d 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
@@ -25,11 +25,16 @@
 import com.epam.dlab.backendapi.annotation.Project;
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.resources.dto.ComputationalCreateFormDTO;
+import com.epam.dlab.backendapi.resources.dto.ComputationalTemplatesDTO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
 import com.epam.dlab.backendapi.service.ComputationalService;
 import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.InfrastructureTemplateService;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.TagService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
 import com.epam.dlab.constants.ServiceConsts;
@@ -38,7 +43,12 @@
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
-import com.epam.dlab.dto.computational.*;
+import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStatusDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.SparkStandaloneClusterResource;
+import com.epam.dlab.dto.computational.UserComputationalResource;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.epam.dlab.rest.client.RESTService;
@@ -48,12 +58,19 @@
 import com.google.inject.name.Named;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Collection;
 import java.util.EnumMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.stream.Collectors;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.RECONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
 import static com.epam.dlab.dto.base.DataEngineType.CLOUD_SERVICE;
 import static com.epam.dlab.dto.base.DataEngineType.SPARK_STANDALONE;
 import static com.epam.dlab.rest.contracts.ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC;
@@ -75,48 +92,78 @@
 		DATA_ENGINE_TYPE_TERMINATE_URLS.put(CLOUD_SERVICE, ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC);
 	}
 
-	@Inject
-	private ExploratoryDAO exploratoryDAO;
+	private final ProjectService projectService;
+	private final ExploratoryDAO exploratoryDAO;
+	private final ComputationalDAO computationalDAO;
+	private final RESTService provisioningService;
+	private final RequestBuilder requestBuilder;
+	private final RequestId requestId;
+	private final TagService tagService;
+	private final EndpointService endpointService;
+	private final InfrastructureTemplateService templateService;
 
 	@Inject
-	private ComputationalDAO computationalDAO;
+	public ComputationalServiceImpl(ProjectService projectService, ExploratoryDAO exploratoryDAO, ComputationalDAO computationalDAO,
+									@Named(ServiceConsts.PROVISIONING_SERVICE_NAME) RESTService provisioningService,
+									RequestBuilder requestBuilder, RequestId requestId, TagService tagService,
+									EndpointService endpointService, InfrastructureTemplateService templateService) {
+		this.projectService = projectService;
+		this.exploratoryDAO = exploratoryDAO;
+		this.computationalDAO = computationalDAO;
+		this.provisioningService = provisioningService;
+		this.requestBuilder = requestBuilder;
+		this.requestId = requestId;
+		this.tagService = tagService;
+		this.endpointService = endpointService;
+		this.templateService = templateService;
+	}
 
-	@Inject
-	@Named(ServiceConsts.PROVISIONING_SERVICE_NAME)
-	private RESTService provisioningService;
-	@Inject
-	private RequestBuilder requestBuilder;
-	@Inject
-	private RequestId requestId;
-	@Inject
-	private TagService tagService;
-	@Inject
-	private EndpointService endpointService;
 
+	@Override
+	public ComputationalTemplatesDTO getComputationalNamesAndTemplates(UserInfo user, String project, String endpoint) {
+		List<FullComputationalTemplate> computationalTemplates = templateService.getComputationalTemplates(user, project, endpoint);
+		List<UserInstanceDTO> userInstances = exploratoryDAO.fetchExploratoryFieldsForProjectWithComp(project);
+
+		List<String> projectComputations = userInstances
+				.stream()
+				.map(UserInstanceDTO::getResources)
+				.flatMap(Collection::stream)
+				.map(UserComputationalResource::getComputationalName)
+				.collect(Collectors.toList());
+		List<String> userComputations = userInstances
+				.stream()
+				.filter(instance -> instance.getUser().equalsIgnoreCase(user.getName()))
+				.map(UserInstanceDTO::getResources)
+				.flatMap(Collection::stream)
+				.map(UserComputationalResource::getComputationalName)
+				.collect(Collectors.toList());
+
+		return new ComputationalTemplatesDTO(computationalTemplates, userComputations, projectComputations);
+	}
 
 	@BudgetLimited
 	@Override
-	public boolean createSparkCluster(UserInfo userInfo, SparkStandaloneClusterCreateForm form,
-									  @Project String project) {
+	public boolean createSparkCluster(UserInfo userInfo, SparkStandaloneClusterCreateForm form, @Project String project) {
 
-
+		final ProjectDTO projectDTO = projectService.get(project);
 		final UserInstanceDTO instance =
-				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), form.getNotebookName());
+				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, form.getNotebookName());
 		final SparkStandaloneClusterResource compResource = createInitialComputationalResource(form);
 		compResource.setTags(tagService.getResourceTags(userInfo, instance.getEndpoint(), project,
 				form.getCustomTag()));
-		if (computationalDAO.addComputational(userInfo.getName(), form.getNotebookName(), compResource)) {
+		if (computationalDAO.addComputational(userInfo.getName(), form.getNotebookName(), project, compResource)) {
 			try {
-				ComputationalBase<?> dto = requestBuilder.newComputationalCreate(userInfo, instance, form);
+				EndpointDTO endpointDTO = endpointService.get(instance.getEndpoint());
+				ComputationalBase<?> dto = requestBuilder.newComputationalCreate(userInfo, projectDTO, instance, form, endpointDTO);
 
 				String uuid =
-						provisioningService.post(endpointService.get(instance.getEndpoint()).getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK,
+						provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK,
 								userInfo.getAccessToken(), dto, String.class);
 				requestId.put(userInfo.getName(), uuid);
 				return true;
 			} catch (RuntimeException e) {
 				try {
-					updateComputationalStatus(userInfo.getName(), form.getNotebookName(), form.getName(), FAILED);
+					updateComputationalStatus(userInfo.getName(), project, form.getNotebookName(), form.getName(), FAILED);
 				} catch (DlabException d) {
 					log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, form.getName(), userInfo.getName(), d);
 				}
@@ -130,29 +177,30 @@
 	}
 
 	@Override
-	public void terminateComputational(UserInfo userInfo, String exploratoryName, String computationalName) {
+	public void terminateComputational(UserInfo userInfo, String project, String exploratoryName, String computationalName) {
 		try {
 
-			updateComputationalStatus(userInfo.getName(), exploratoryName, computationalName, TERMINATING);
+			updateComputationalStatus(userInfo.getName(), project, exploratoryName, computationalName, TERMINATING);
 
-			final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+			final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project,
 					exploratoryName);
-			UserComputationalResource compResource = computationalDAO.fetchComputationalFields(userInfo
-					.getName(), exploratoryName, computationalName);
+			UserComputationalResource compResource = computationalDAO.fetchComputationalFields(userInfo.getName(), project,
+					exploratoryName, computationalName);
 
 			final DataEngineType dataEngineType = compResource.getDataEngineType();
-			ComputationalTerminateDTO dto = requestBuilder.newComputationalTerminate(userInfo, userInstanceDTO, compResource);
+			EndpointDTO endpointDTO = endpointService.get(userInstanceDTO.getEndpoint());
+			ComputationalTerminateDTO dto = requestBuilder.newComputationalTerminate(userInfo, userInstanceDTO, compResource, endpointDTO);
 
 			final String provisioningUrl = Optional.ofNullable(DATA_ENGINE_TYPE_TERMINATE_URLS.get(dataEngineType))
 					.orElseThrow(UnsupportedOperationException::new);
 			String uuid =
-					provisioningService.post(endpointService.get(userInstanceDTO.getEndpoint()).getUrl() + provisioningUrl,
+					provisioningService.post(endpointDTO.getUrl() + provisioningUrl,
 							userInfo.getAccessToken(), dto, String.class);
 			requestId.put(userInfo.getName(), uuid);
 		} catch (RuntimeException re) {
 
 			try {
-				updateComputationalStatus(userInfo.getName(), exploratoryName, computationalName, FAILED);
+				updateComputationalStatus(userInfo.getName(), project, exploratoryName, computationalName, FAILED);
 			} catch (DlabException e) {
 				log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, computationalName, userInfo.getName(), e);
 			}
@@ -166,25 +214,29 @@
 	public boolean createDataEngineService(UserInfo userInfo, ComputationalCreateFormDTO formDTO,
 										   UserComputationalResource computationalResource, @Project String project) {
 
-		final UserInstanceDTO instance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO
+		final ProjectDTO projectDTO = projectService.get(project);
+		final UserInstanceDTO instance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, formDTO
 				.getNotebookName());
 		final Map<String, String> tags = tagService.getResourceTags(userInfo, instance.getEndpoint(), project,
 				formDTO.getCustomTag());
 		computationalResource.setTags(tags);
-		boolean isAdded = computationalDAO.addComputational(userInfo.getName(), formDTO.getNotebookName(),
+		boolean isAdded = computationalDAO.addComputational(userInfo.getName(), formDTO.getNotebookName(), project,
 				computationalResource);
 
 		if (isAdded) {
 			try {
+				EndpointDTO endpointDTO = endpointService.get(instance.getEndpoint());
 				String uuid =
-						provisioningService.post(endpointService.get(instance.getEndpoint()).getUrl() + COMPUTATIONAL_CREATE_CLOUD_SPECIFIC, userInfo.getAccessToken(),
-								requestBuilder.newComputationalCreate(userInfo, instance, formDTO), String.class);
+						provisioningService.post(endpointDTO.getUrl() + COMPUTATIONAL_CREATE_CLOUD_SPECIFIC,
+								userInfo.getAccessToken(),
+								requestBuilder.newComputationalCreate(userInfo, projectDTO, instance, formDTO, endpointDTO),
+								String.class);
 				requestId.put(userInfo.getName(), uuid);
 				return true;
 			} catch (Exception t) {
 				try {
-					updateComputationalStatus(userInfo.getName(), formDTO.getNotebookName(), formDTO.getName(),
-							FAILED);
+					updateComputationalStatus(userInfo.getName(), project, formDTO.getNotebookName(),
+							formDTO.getName(), FAILED);
 				} catch (DlabException e) {
 					log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, formDTO.getName(), userInfo.getName(), e);
 				}
@@ -198,16 +250,18 @@
 	}
 
 	@Override
-	public void stopSparkCluster(UserInfo userInfo, String expName, String compName) {
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), expName, true);
+	public void stopSparkCluster(UserInfo userInfo, String project, String expName, String compName) {
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, expName, true);
 		final UserInstanceStatus requiredStatus = UserInstanceStatus.RUNNING;
 		if (computationalWithStatusResourceExist(compName, userInstance, requiredStatus)) {
 			log.debug("{} spark cluster {} for userInstance {}", STOPPING.toString(), compName, expName);
-			updateComputationalStatus(userInfo.getName(), expName, compName, STOPPING);
+			updateComputationalStatus(userInfo.getName(), project, expName, compName, STOPPING);
+			EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 			final String uuid =
-					provisioningService.post(endpointService.get(userInstance.getEndpoint()).getUrl() + ComputationalAPI.COMPUTATIONAL_STOP_SPARK,
+					provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_STOP_SPARK,
 							userInfo.getAccessToken(),
-							requestBuilder.newComputationalStop(userInfo, userInstance, compName), String.class);
+							requestBuilder.newComputationalStop(userInfo, userInstance, compName, endpointDTO),
+							String.class);
 			requestId.put(userInfo.getName(), uuid);
 		} else {
 			throw new IllegalStateException(String.format(DATAENGINE_NOT_PRESENT_FORMAT,
@@ -220,15 +274,17 @@
 	@Override
 	public void startSparkCluster(UserInfo userInfo, String expName, String compName, @Project String project) {
 		final UserInstanceDTO userInstance =
-				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), expName, true);
+				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, expName, true);
 		final UserInstanceStatus requiredStatus = UserInstanceStatus.STOPPED;
 		if (computationalWithStatusResourceExist(compName, userInstance, requiredStatus)) {
 			log.debug("{} spark cluster {} for userInstance {}", STARTING.toString(), compName, expName);
-			updateComputationalStatus(userInfo.getName(), expName, compName, STARTING);
+			updateComputationalStatus(userInfo.getName(), project, expName, compName, STARTING);
+			EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 			final String uuid =
-					provisioningService.post(endpointService.get(userInstance.getEndpoint()).getUrl() + ComputationalAPI.COMPUTATIONAL_START_SPARK,
-							userInfo.getAccessToken(), requestBuilder.newComputationalStart(userInfo, userInstance,
-									compName), String.class);
+					provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_START_SPARK,
+							userInfo.getAccessToken(),
+							requestBuilder.newComputationalStart(userInfo, userInstance, compName, endpointDTO),
+							String.class);
 			requestId.put(userInfo.getName(), uuid);
 		} else {
 			throw new IllegalStateException(String.format(DATAENGINE_NOT_PRESENT_FORMAT,
@@ -237,12 +293,12 @@
 	}
 
 	@Override
-	public void updateSparkClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName,
+	public void updateSparkClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName,
 										 List<ClusterConfig> config) {
 		final String userName = userInfo.getName();
 		final String token = userInfo.getAccessToken();
 		final UserInstanceDTO userInstanceDTO = exploratoryDAO
-				.fetchExploratoryFields(userName, exploratoryName, true);
+				.fetchExploratoryFields(userName, project, exploratoryName, true);
 		final UserComputationalResource compResource = userInstanceDTO
 				.getResources()
 				.stream()
@@ -250,12 +306,14 @@
 				.findAny()
 				.orElseThrow(() -> new ResourceNotFoundException(String.format(RUNNING_COMP_RES_NOT_FOUND,
 						computationalName, exploratoryName)));
+		EndpointDTO endpointDTO = endpointService.get(userInstanceDTO.getEndpoint());
 		final ComputationalClusterConfigDTO clusterConfigDto = requestBuilder.newClusterConfigUpdate(userInfo,
-				userInstanceDTO, compResource, config);
+				userInstanceDTO, compResource, config, endpointDTO);
 		final String uuid =
-				provisioningService.post(endpointService.get(userInstanceDTO.getEndpoint()).getUrl() + ComputationalAPI.COMPUTATIONAL_RECONFIGURE_SPARK, token,
-						clusterConfigDto, String.class);
+				provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_RECONFIGURE_SPARK,
+						token, clusterConfigDto, String.class);
 		computationalDAO.updateComputationalFields(new ComputationalStatusDTO()
+				.withProject(userInstanceDTO.getProject())
 				.withComputationalName(computationalName)
 				.withExploratoryName(exploratoryName)
 				.withConfig(config)
@@ -266,37 +324,19 @@
 	}
 
 	/**
-	 * Updates parameter 'reuploadKeyRequired' for corresponding user's computational resources with allowable statuses
-	 * which are affiliated with exploratories with theirs allowable statuses.
-	 *
-	 * @param user                  user.
-	 * @param exploratoryStatuses   allowable exploratories' statuses.
-	 * @param computationalTypes    type list of computational resource.
-	 * @param reuploadKeyRequired   true/false.
-	 * @param computationalStatuses allowable statuses for computational resources.
-	 */
-	@Override
-	public void updateComputationalsReuploadKeyFlag(String user, List<UserInstanceStatus> exploratoryStatuses,
-													List<DataEngineType> computationalTypes,
-													boolean reuploadKeyRequired,
-													UserInstanceStatus... computationalStatuses) {
-		computationalDAO.updateReuploadKeyFlagForComputationalResources(user, exploratoryStatuses, computationalTypes,
-				reuploadKeyRequired, computationalStatuses);
-	}
-
-	/**
 	 * Returns computational resource's data by name for user's exploratory.
 	 *
-	 * @param user              user.
+	 * @param user              user
+	 * @param project           name of project
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @return corresponding computational resource's data or empty data if resource doesn't exist.
 	 */
 	@Override
-	public Optional<UserComputationalResource> getComputationalResource(String user, String exploratoryName,
+	public Optional<UserComputationalResource> getComputationalResource(String user, String project, String exploratoryName,
 																		String computationalName) {
 		try {
-			return Optional.of(computationalDAO.fetchComputationalFields(user, exploratoryName, computationalName));
+			return Optional.of(computationalDAO.fetchComputationalFields(user, project, exploratoryName, computationalName));
 		} catch (DlabException e) {
 			log.warn("Computational resource {} affiliated with exploratory {} for user {} not found.",
 					computationalName, exploratoryName, user);
@@ -305,22 +345,24 @@
 	}
 
 	@Override
-	public List<ClusterConfig> getClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName) {
-		return computationalDAO.getClusterConfig(userInfo.getName(), exploratoryName, computationalName);
+	public List<ClusterConfig> getClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName) {
+		return computationalDAO.getClusterConfig(userInfo.getName(), project, exploratoryName, computationalName);
 	}
 
 	/**
 	 * Updates the status of computational resource in database.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @param status            status
 	 */
-	private void updateComputationalStatus(String user, String exploratoryName, String computationalName,
+	private void updateComputationalStatus(String user, String project, String exploratoryName, String computationalName,
 										   UserInstanceStatus status) {
 		ComputationalStatusDTO computationalStatus = new ComputationalStatusDTO()
 				.withUser(user)
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withComputationalName(computationalName)
 				.withStatus(status);
@@ -354,5 +396,4 @@
 				compResource.getDataEngineType() == SPARK_STANDALONE &&
 				compResource.getComputationalName().equals(computationalName);
 	}
-
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EdgeServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EdgeServiceImpl.java
deleted file mode 100644
index e62b52f..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EdgeServiceImpl.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.impl;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.annotation.BudgetLimited;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.dao.ProjectDAO;
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.service.EdgeService;
-import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.constants.ServiceConsts;
-import com.epam.dlab.dto.ResourceSysBaseDTO;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.google.inject.name.Named;
-import lombok.extern.slf4j.Slf4j;
-
-import static com.epam.dlab.dto.UserInstanceStatus.*;
-import static com.epam.dlab.rest.contracts.EdgeAPI.*;
-
-@Singleton
-@Slf4j
-public class EdgeServiceImpl implements EdgeService {
-
-	@Inject
-	private KeyDAO keyDAO;
-	@Inject
-	private ProjectDAO projectDAO;
-
-	@Inject
-	@Named(ServiceConsts.PROVISIONING_SERVICE_NAME)
-	private RESTService provisioningService;
-
-	@Inject
-	private RequestBuilder requestBuilder;
-
-	@Inject
-	private RequestId requestId;
-
-
-	@BudgetLimited
-	@Override
-	public String start(UserInfo userInfo) {
-		log.debug("Starting EDGE node for user {}", userInfo.getName());
-		UserInstanceStatus status = UserInstanceStatus.of(keyDAO.getEdgeStatus(userInfo.getName()));
-		if (status == null || !status.in(STOPPED)) {
-			log.error("Could not start EDGE node for user {} because the status of instance is {}",
-					userInfo.getName(), status);
-			throw new DlabException("Could not start EDGE node because the status of instance is " + status);
-		}
-		try {
-			return action(userInfo, EDGE_START, STARTING);
-		} catch (DlabException e) {
-			log.error("Could not start EDGE node for user {}", userInfo.getName(), e);
-			throw new DlabException("Could not start EDGE node: " + e.getLocalizedMessage(), e);
-		}
-	}
-
-	@Override
-	public String stop(UserInfo userInfo) {
-		log.debug("Stopping EDGE node for user {}", userInfo.getName());
-		UserInstanceStatus status = UserInstanceStatus.of(keyDAO.getEdgeStatus(userInfo.getName()));
-		if (status == null || !status.in(RUNNING)) {
-			log.error("Could not stop EDGE node for user {} because the status of instance is {}",
-					userInfo.getName(), status);
-			throw new DlabException("Could not stop EDGE node because the status of instance is " + status);
-		}
-
-		try {
-			return action(userInfo, EDGE_STOP, STOPPING);
-		} catch (DlabException e) {
-			log.error("Could not stop EDGE node for user {}", userInfo.getName(), e);
-			throw new DlabException("Could not stop EDGE node: " + e.getLocalizedMessage(), e);
-		}
-	}
-
-	@Override
-	public String terminate(UserInfo userInfo) {
-		log.debug("Terminating EDGE node for user {}", userInfo.getName());
-		UserInstanceStatus status = UserInstanceStatus.of(keyDAO.getEdgeStatus(userInfo.getName()));
-		if (status == null) {
-			log.error("Could not terminate EDGE node for user {} because the status of instance is null",
-					userInfo.getName());
-			throw new DlabException("Could not terminate EDGE node because the status of instance is null");
-		}
-
-		try {
-			return action(userInfo, EDGE_TERMINATE, TERMINATING);
-		} catch (DlabException e) {
-			log.error("Could not terminate EDGE node for user {}", userInfo.getName(), e);
-			throw new DlabException("Could not terminate EDGE node: " + e.getLocalizedMessage(), e);
-		}
-	}
-
-	/**
-	 * Updates parameter 'reuploadKeyRequired' for user's edge node with allowable statuses.
-	 *
-	 * @param user                user.
-	 * @param reuploadKeyRequired true/false.
-	 * @param edgeStatuses        allowable statuses of edge node.
-	 */
-	@Override
-	public void updateReuploadKeyFlag(String user, boolean reuploadKeyRequired, UserInstanceStatus... edgeStatuses) {
-		keyDAO.updateEdgeReuploadKey(user, reuploadKeyRequired, edgeStatuses);
-	}
-
-	@Override
-	public String getEdgeInfo(UserInfo userInfo, String projectName) {
-		return null;
-	}
-
-	/**
-	 * Sends the post request to the provisioning service and update the status of EDGE node.
-	 *
-	 * @param userInfo user info.
-	 * @param action   action for EDGE node.
-	 * @param status   status of EDGE node.
-	 * @return Request Id.
-	 */
-	private String action(UserInfo userInfo, String action, UserInstanceStatus status) {
-		try {
-			keyDAO.updateEdgeStatus(userInfo.getName(), status.toString());
-			ResourceSysBaseDTO<?> dto = requestBuilder.newEdgeAction(userInfo);
-			String uuid = provisioningService.post(action, userInfo.getAccessToken(), dto, String.class);
-			requestId.put(userInfo.getName(), uuid);
-			return uuid;
-		} catch (Exception t) {
-			keyDAO.updateEdgeStatus(userInfo.getName(), FAILED.toString());
-			throw new DlabException("Could not " + action + " EDGE node " + ": " + t.getLocalizedMessage(), t);
-		}
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EndpointServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EndpointServiceImpl.java
index e1fe3ea..57c6549 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EndpointServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EndpointServiceImpl.java
@@ -19,21 +19,55 @@
 
 package com.epam.dlab.backendapi.service.impl;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.EndpointDAO;
+import com.epam.dlab.backendapi.dao.ExploratoryDAO;
+import com.epam.dlab.backendapi.dao.UserRoleDao;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.EndpointResourcesDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.cloud.CloudProvider;
+import com.epam.dlab.constants.ServiceConsts;
+import com.epam.dlab.dto.UserInstanceDTO;
+import com.epam.dlab.dto.UserInstanceStatus;
+import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceConflictException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
+import com.epam.dlab.rest.client.RESTService;
 import com.google.inject.Inject;
+import com.google.inject.name.Named;
+import lombok.extern.slf4j.Slf4j;
 
+import javax.ws.rs.core.Response;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
 
+
+@Slf4j
 public class EndpointServiceImpl implements EndpointService {
+	private static final String HEALTH_CHECK = "healthcheck";
 	private final EndpointDAO endpointDAO;
+	private final ProjectService projectService;
+	private final ExploratoryDAO exploratoryDAO;
+	private final RESTService provisioningService;
+	private final UserRoleDao userRoleDao;
 
 	@Inject
-	public EndpointServiceImpl(EndpointDAO endpointDAO) {
+	public EndpointServiceImpl(EndpointDAO endpointDAO, ProjectService projectService, ExploratoryDAO exploratoryDAO,
+							   @Named(ServiceConsts.PROVISIONING_SERVICE_NAME) RESTService provisioningService,
+							   UserRoleDao userRoleDao) {
+
 		this.endpointDAO = endpointDAO;
+		this.projectService = projectService;
+		this.exploratoryDAO = exploratoryDAO;
+		this.provisioningService = provisioningService;
+		this.userRoleDao = userRoleDao;
 	}
 
 	@Override
@@ -42,22 +76,106 @@
 	}
 
 	@Override
+	public List<EndpointDTO> getEndpointsWithStatus(EndpointDTO.EndpointStatus status) {
+		return endpointDAO.getEndpointsWithStatus(status.name());
+	}
+
+	@Override
+	public EndpointResourcesDTO getEndpointResources(String endpoint) {
+		List<UserInstanceDTO> exploratories = exploratoryDAO.fetchExploratoriesByEndpointWhereStatusNotIn(endpoint,
+				Arrays.asList(UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED));
+
+		List<ProjectDTO> projects = projectService.getProjectsByEndpoint(endpoint);
+
+		return new EndpointResourcesDTO(exploratories, projects);
+	}
+
+	@Override
 	public EndpointDTO get(String name) {
 		return endpointDAO.get(name)
 				.orElseThrow(() -> new ResourceNotFoundException("Endpoint with name " + name + " not found"));
 	}
 
+	/**
+	 * Create new endpoint object in the System.
+	 * The Endpoint objects should contain Unique values of the 'url' and 'name' fields,
+	 * i.e two objects with same URLs should not be created in the system.
+	 * @param userInfo user properties
+	 * @param endpointDTO object with endpoint fields
+	 */
 	@Override
-	public void create(EndpointDTO endpointDTO) {
-		if (!endpointDAO.get(endpointDTO.getName()).isPresent()) {
-			endpointDAO.create(endpointDTO);
-		} else {
-			throw new ResourceConflictException("Endpoint with passed name already exist in system");
+	public void create(UserInfo userInfo, EndpointDTO endpointDTO) {
+		if (endpointDAO.get(endpointDTO.getName()).isPresent()) {
+			throw new ResourceConflictException("The Endpoint with this name exists in system");
 		}
+		if(endpointDAO.getEndpointWithUrl(endpointDTO.getUrl()).isPresent()) {
+		    throw new ResourceConflictException("The Endpoint URL with this address exists in system");
+		}
+		CloudProvider cloudProvider = checkUrl(userInfo, endpointDTO.getUrl());
+		if (Objects.isNull(cloudProvider)) {
+			throw new DlabException("CloudProvider cannot be null");
+		}
+		endpointDAO.create(new EndpointDTO(endpointDTO.getName(), endpointDTO.getUrl(), endpointDTO.getAccount(),
+				endpointDTO.getTag(), EndpointDTO.EndpointStatus.ACTIVE, cloudProvider));
+		userRoleDao.updateMissingRoles(cloudProvider);
 	}
 
 	@Override
-	public void remove(String name) {
+	public void updateEndpointStatus(String name, EndpointDTO.EndpointStatus status) {
+		endpointDAO.updateEndpointStatus(name, status.name());
+	}
+
+	@Override
+	public void remove(UserInfo userInfo, String name, boolean withResources) {
+		Optional<EndpointDTO> endpointDTO = endpointDAO.get(name);
+		endpointDTO.orElseThrow(() -> new ResourceNotFoundException(String.format("Endpoint %s does not exist", name)));
+		List<ProjectDTO> projects = projectService.getProjectsByEndpoint(name);
+		checkProjectEndpointResourcesStatuses(projects, name);
+
+		if (withResources) {
+			removeEndpointInAllProjects(userInfo, name, projects);
+		}
+		CloudProvider cloudProvider = endpointDTO.get().getCloudProvider();
 		endpointDAO.remove(name);
+		List<CloudProvider> remainingProviders = endpointDAO.getEndpoints().stream()
+				.map(EndpointDTO::getCloudProvider)
+				.collect(Collectors.toList());
+		userRoleDao.removeUnnecessaryRoles(cloudProvider, remainingProviders);
+	}
+
+	@Override
+	public void removeEndpointInAllProjects(UserInfo userInfo, String endpointName, List<ProjectDTO> projects) {
+		projects.forEach(project -> projectService.terminateEndpoint(userInfo, endpointName, project.getName()));
+	}
+
+	@Override
+	public CloudProvider checkUrl(UserInfo userInfo, String url) {
+		Response response;
+		CloudProvider cloudProvider;
+		try {
+			response = provisioningService.get(url + HEALTH_CHECK, userInfo.getAccessToken(), Response.class);
+			cloudProvider = response.readEntity(CloudProvider.class);
+		} catch (Exception e) {
+			log.error("Cannot connect to url '{}'. {}", url, e.getMessage());
+			throw new DlabException(String.format("Cannot connect to url '%s'. %s", url, e.getMessage()));
+		}
+		if (response.getStatus() != 200) {
+			log.warn("Endpoint url {} is not valid", url);
+			throw new ResourceNotFoundException(String.format("Endpoint url '%s' is not valid", url));
+		}
+		return cloudProvider;
+	}
+
+	private void checkProjectEndpointResourcesStatuses(List<ProjectDTO> projects, String endpoint) {
+		boolean isTerminationEnabled = projects.stream().anyMatch(p ->
+				!projectService.checkExploratoriesAndComputationalProgress(p.getName(), Collections.singletonList(endpoint)) ||
+						p.getEndpoints().stream().anyMatch(e -> e.getName().equals(endpoint) &&
+								Arrays.asList(UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.STOPPING,
+										UserInstanceStatus.TERMINATING).contains(e.getStatus())));
+
+		if (isTerminationEnabled) {
+			throw new ResourceConflictException(("Can not terminate resources of endpoint because one of project " +
+					"resource is in processing stage"));
+		}
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
index 09428c4..8b2806b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
@@ -20,14 +20,20 @@
 package com.epam.dlab.backendapi.service.impl;
 
 import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.annotation.Project;
+import com.epam.dlab.backendapi.annotation.ProjectAdmin;
+import com.epam.dlab.backendapi.annotation.User;
 import com.epam.dlab.backendapi.dao.EnvDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
 import com.epam.dlab.backendapi.dao.UserSettingsDAO;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.UserDTO;
 import com.epam.dlab.backendapi.resources.dto.UserResourceInfo;
-import com.epam.dlab.backendapi.service.*;
+import com.epam.dlab.backendapi.service.ComputationalService;
+import com.epam.dlab.backendapi.service.EnvironmentService;
+import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.backendapi.service.SecurityService;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.exceptions.ResourceConflictException;
@@ -49,27 +55,29 @@
 @Singleton
 @Slf4j
 public class EnvironmentServiceImpl implements EnvironmentService {
-
 	private static final String ERROR_MSG_FORMAT = "Can not %s environment because on of user resource is in status " +
 			"CREATING or STARTING";
+
+	private final EnvDAO envDAO;
+	private final UserSettingsDAO settingsDAO;
+	private final ExploratoryDAO exploratoryDAO;
+	private final ExploratoryService exploratoryService;
+	private final ComputationalService computationalService;
+	private final SecurityService securityService;
+	private final ProjectService projectService;
+
 	@Inject
-	private EnvDAO envDAO;
-	@Inject
-	private ExploratoryDAO exploratoryDAO;
-	@Inject
-	private ExploratoryService exploratoryService;
-	@Inject
-	private ComputationalService computationalService;
-	@Inject
-	private SecurityService securityService;
-	@Inject
-	private KeyDAO keyDAO;
-	@Inject
-	private EdgeService edgeService;
-	@Inject
-	private ProjectService projectService;
-	@Inject
-	private UserSettingsDAO settingsDAO;
+	public EnvironmentServiceImpl(EnvDAO envDAO, UserSettingsDAO settingsDAO, ExploratoryDAO exploratoryDAO,
+								  ExploratoryService exploratoryService, ComputationalService computationalService,
+								  SecurityService securityService, ProjectService projectService) {
+		this.envDAO = envDAO;
+		this.settingsDAO = settingsDAO;
+		this.exploratoryDAO = exploratoryDAO;
+		this.exploratoryService = exploratoryService;
+		this.computationalService = computationalService;
+		this.securityService = securityService;
+		this.projectService = projectService;
+	}
 
 	@Override
 	public List<UserDTO> getUsers() {
@@ -88,18 +96,13 @@
 	}
 
 	@Override
-	public Set<String> getUserNames() {
-		log.debug("Getting all users...");
-		return envDAO.fetchAllUsers();
-	}
-
-	@Override
-	public List<UserResourceInfo> getAllEnv() {
+	public List<UserResourceInfo> getAllEnv(UserInfo user) {
 		log.debug("Getting all user's environment...");
 		List<UserInstanceDTO> expList = exploratoryDAO.getInstances();
-		return projectService.getProjects()
+		return projectService.getProjects(user)
 				.stream()
-				.map(projectDTO -> getProjectEnv(projectDTO, expList)).flatMap(Collection::stream)
+				.map(projectDTO -> getProjectEnv(projectDTO, expList))
+				.flatMap(Collection::stream)
 				.collect(toList());
 	}
 
@@ -113,21 +116,11 @@
 	}
 
 	@Override
-	public void stopEnvironment(String user) {
-		log.debug("Stopping environment for user {}", user);
-		checkState(user, "stop");
-		exploratoryDAO.fetchRunningExploratoryFields(user)
-				.forEach(this::stopNotebook);
-		stopEdge(user);
-	}
-
-	@Override
 	public void stopEnvironmentWithServiceAccount(String user) {
 		log.debug("Stopping environment for user {} by scheduler", user);
 		checkState(user, "stop");
 		exploratoryDAO.fetchRunningExploratoryFields(user)
 				.forEach(this::stopNotebookWithServiceAccount);
-		stopEdge(user);
 	}
 
 	@Override
@@ -143,48 +136,32 @@
 						endpoint.getName(), project));
 	}
 
+	@ProjectAdmin
 	@Override
-	public void stopEdge(String user) {
-		if (UserInstanceStatus.RUNNING.toString().equals(keyDAO.getEdgeStatus(user))) {
-			edgeService.stop(securityService.getUserInfoOffline(user));
-		}
+	public void stopExploratory(@User UserInfo userInfo, String user, @Project String project, String exploratoryName) {
+		exploratoryService.stop(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void stopExploratory(String user, String exploratoryName) {
-		stopNotebook(new UserInstanceDTO().withUser(user).withExploratoryName(exploratoryName));
+	public void stopComputational(@User UserInfo userInfo, String user, @Project String project, String exploratoryName,
+								  String computationalName) {
+		computationalService.stopSparkCluster(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName,
+				computationalName);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void stopComputational(String user, String exploratoryName, String computationalName) {
-		stopDataengine(user, exploratoryName, computationalName);
+	public void terminateExploratory(@User UserInfo userInfo, String user, @Project String project, String exploratoryName) {
+		exploratoryService.terminate(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void terminateAll() {
-		log.debug("Terminating environment for all users...");
-		getUserNames().forEach(this::terminateEnvironment);
-	}
-
-	@Override
-	public void terminateEnvironment(String user) {
-		log.debug("Terminating environment for user {}", user);
-		checkState(user, "terminate");
-		if (!terminateEdge(user)) {
-			exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(user, UserInstanceStatus.TERMINATED,
-					UserInstanceStatus.FAILED, UserInstanceStatus.TERMINATING)
-					.forEach(this::terminateNotebook);
-		}
-	}
-
-	@Override
-	public void terminateExploratory(String user, String exploratoryName) {
-		terminateNotebook(new UserInstanceDTO().withUser(user).withExploratoryName(exploratoryName));
-	}
-
-	@Override
-	public void terminateComputational(String user, String exploratoryName, String computationalName) {
-		terminateCluster(user, exploratoryName, computationalName);
+	public void terminateComputational(@User UserInfo userInfo, String user, @Project String project,
+									   String exploratoryName, String computationalName) {
+		computationalService.terminateComputational(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName,
+				computationalName);
 	}
 
 	private UserDTO toUserDTO(String u, UserDTO.Status status) {
@@ -198,49 +175,22 @@
 								UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
 						UserInstanceStatus.CREATING,
 						UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		if (UserInstanceStatus.STARTING.toString().equals(keyDAO.getEdgeStatus(user)) || !userInstances.isEmpty()) {
+		if (!userInstances.isEmpty()) {
 			log.error(String.format(ERROR_MSG_FORMAT, action));
 			throw new ResourceConflictException(String.format(ERROR_MSG_FORMAT, action));
 		}
 	}
 
-	private void stopNotebook(UserInstanceDTO instance) {
-		final UserInfo userInfo = securityService.getUserInfoOffline(instance.getUser());
-		exploratoryService.stop(userInfo, instance.getExploratoryName());
-	}
-
 	private void stopNotebookWithServiceAccount(UserInstanceDTO instance) {
 		final UserInfo userInfo = securityService.getServiceAccountInfo(instance.getUser());
-		exploratoryService.stop(userInfo, instance.getExploratoryName());
-	}
-
-	private void stopDataengine(String user, String exploratoryName, String computationalName) {
-		final UserInfo userInfo = securityService.getUserInfoOffline(user);
-		computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
-	}
-
-	private boolean terminateEdge(String user) {
-		final boolean nodeExists = keyDAO.edgeNodeExist(user);
-		if (nodeExists) {
-			edgeService.terminate(securityService.getUserInfoOffline(user));
-			exploratoryService.updateExploratoryStatuses(user, UserInstanceStatus.TERMINATING);
-		}
-		return nodeExists;
-	}
-
-	private void terminateNotebook(UserInstanceDTO instance) {
-		final UserInfo userInfo = securityService.getUserInfoOffline(instance.getUser());
-		exploratoryService.terminate(userInfo, instance.getExploratoryName());
-	}
-
-	private void terminateCluster(String user, String exploratoryName, String computationalName) {
-		final UserInfo userInfo = securityService.getUserInfoOffline(user);
-		computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+		exploratoryService.stop(userInfo, instance.getProject(), instance.getExploratoryName());
 	}
 
 	private List<UserResourceInfo> getProjectEnv(ProjectDTO projectDTO, List<UserInstanceDTO> allInstances) {
-		final Stream<UserResourceInfo> userResources = allInstances.stream()
-				.filter(instance -> instance.getProject().equals(projectDTO.getName())).map(this::toUserResourceInfo);
+		final Stream<UserResourceInfo> userResources = allInstances
+				.stream()
+				.filter(instance -> instance.getProject().equals(projectDTO.getName()))
+				.map(this::toUserResourceInfo);
 		if (projectDTO.getEndpoints() != null) {
 			final Stream<UserResourceInfo> edges = projectDTO.getEndpoints()
 					.stream()
@@ -248,8 +198,7 @@
 							.withResourceStatus(e.getStatus().toString())
 							.withProject(projectDTO.getName())
 							.withIp(e.getEdgeInfo() != null ? e.getEdgeInfo().getPublicIp() : null));
-			return Stream.concat(edges, userResources)
-					.collect(toList());
+			return Stream.concat(edges, userResources).collect(toList());
 		} else {
 			return userResources.collect(toList());
 		}
@@ -262,7 +211,8 @@
 				.withResourceStatus(userInstance.getStatus())
 				.withCompResources(userInstance.getResources())
 				.withUser(userInstance.getUser())
-				.withProject(userInstance.getProject());
+				.withProject(userInstance.getProject())
+				.withCloudProvider(userInstance.getCloudProvider());
 	}
 
 	private void checkProjectResourceConditions(String project, String action) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
index 9c5594b..9f6be91 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
@@ -26,20 +26,29 @@
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.GitCredsDAO;
 import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
+import com.epam.dlab.backendapi.resources.dto.ExploratoryCreatePopUp;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.TagService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.dto.StatusEnvBaseDTO;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.dto.computational.UserComputationalResource;
-import com.epam.dlab.dto.exploratory.*;
+import com.epam.dlab.dto.exploratory.ExploratoryActionDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryGitCredsDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryReconfigureSparkClusterActionDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryStatusDTO;
+import com.epam.dlab.dto.exploratory.LibInstallDTO;
+import com.epam.dlab.dto.exploratory.LibStatus;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.model.ResourceType;
 import com.epam.dlab.model.exploratory.Exploratory;
 import com.epam.dlab.model.library.Library;
 import com.epam.dlab.rest.client.RESTService;
@@ -50,17 +59,31 @@
 import org.apache.commons.lang3.StringUtils;
 
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.stream.Collectors;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
-import static com.epam.dlab.rest.contracts.ExploratoryAPI.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
+import static com.epam.dlab.rest.contracts.ExploratoryAPI.EXPLORATORY_CREATE;
+import static com.epam.dlab.rest.contracts.ExploratoryAPI.EXPLORATORY_RECONFIGURE_SPARK;
+import static com.epam.dlab.rest.contracts.ExploratoryAPI.EXPLORATORY_START;
+import static com.epam.dlab.rest.contracts.ExploratoryAPI.EXPLORATORY_STOP;
+import static com.epam.dlab.rest.contracts.ExploratoryAPI.EXPLORATORY_TERMINATE;
 
 @Slf4j
 @Singleton
 public class ExploratoryServiceImpl implements ExploratoryService {
 
 	@Inject
+	private ProjectService projectService;
+	@Inject
 	private ExploratoryDAO exploratoryDAO;
 	@Inject
 	private ComputationalDAO computationalDAO;
@@ -83,17 +106,17 @@
 	@BudgetLimited
 	@Override
 	public String start(UserInfo userInfo, String exploratoryName, @Project String project) {
-		return action(userInfo, exploratoryName, EXPLORATORY_START, STARTING);
+		return action(userInfo, project, exploratoryName, EXPLORATORY_START, STARTING);
 	}
 
 	@Override
-	public String stop(UserInfo userInfo, String exploratoryName) {
-		return action(userInfo, exploratoryName, EXPLORATORY_STOP, STOPPING);
+	public String stop(UserInfo userInfo, String project, String exploratoryName) {
+		return action(userInfo, project, exploratoryName, EXPLORATORY_STOP, STOPPING);
 	}
 
 	@Override
-	public String terminate(UserInfo userInfo, String exploratoryName) {
-		return action(userInfo, exploratoryName, EXPLORATORY_TERMINATE, TERMINATING);
+	public String terminate(UserInfo userInfo, String project, String exploratoryName) {
+		return action(userInfo, project, exploratoryName, EXPLORATORY_TERMINATE, TERMINATING);
 	}
 
 	@BudgetLimited
@@ -101,16 +124,18 @@
 	public String create(UserInfo userInfo, Exploratory exploratory, @Project String project) {
 		boolean isAdded = false;
 		try {
-			final UserInstanceDTO userInstanceDTO = getUserInstanceDTO(userInfo, exploratory, project);
+			final ProjectDTO projectDTO = projectService.get(project);
+			final EndpointDTO endpointDTO = endpointService.get(exploratory.getEndpoint());
+			final UserInstanceDTO userInstanceDTO = getUserInstanceDTO(userInfo, exploratory, project, endpointDTO.getCloudProvider());
 			exploratoryDAO.insertExploratory(userInstanceDTO);
 			isAdded = true;
 			final ExploratoryGitCredsDTO gitCreds = gitCredsDAO.findGitCreds(userInfo.getName());
 			log.debug("Created exploratory environment {} for user {}", exploratory.getName(), userInfo.getName());
 			final String uuid =
-					provisioningService.post(endpointService.get(userInstanceDTO.getEndpoint()).getUrl() + EXPLORATORY_CREATE,
+					provisioningService.post(endpointDTO.getUrl() + EXPLORATORY_CREATE,
 							userInfo.getAccessToken(),
-							requestBuilder.newExploratoryCreate(exploratory, userInfo, gitCreds,
-									userInstanceDTO.getTags()),
+							requestBuilder.newExploratoryCreate(projectDTO, endpointDTO, exploratory, userInfo,
+									gitCreds, userInstanceDTO.getTags()),
 							String.class);
 			requestId.put(userInfo.getName(), uuid);
 			return uuid;
@@ -118,69 +143,34 @@
 			log.error("Could not update the status of exploratory environment {} with name {} for user {}",
 					exploratory.getDockerImage(), exploratory.getName(), userInfo.getName(), t);
 			if (isAdded) {
-				updateExploratoryStatusSilent(userInfo.getName(), exploratory.getName(), FAILED);
+				updateExploratoryStatusSilent(userInfo.getName(), project, exploratory.getName(), FAILED);
 			}
 			throw new DlabException("Could not create exploratory environment " + exploratory.getName() + " for user "
-					+ userInfo.getName() + ": " + t.getLocalizedMessage(), t);
+					+ userInfo.getName() + ": " + Optional.ofNullable(t.getCause()).map(Throwable::getMessage).orElse(t.getMessage()), t);
 		}
 	}
 
 	@Override
-	public void updateExploratoryStatuses(String user, UserInstanceStatus status) {
-		exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(user, TERMINATED, FAILED)
-				.forEach(ui -> updateExploratoryStatus(ui.getExploratoryName(), status, user));
-	}
-
-	@Override
 	public void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status) {
 		exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(project, endpoint, TERMINATED, FAILED)
-				.forEach(ui -> updateExploratoryStatus(ui.getExploratoryName(), status, ui.getUser()));
-	}
-
-	/**
-	 * Updates parameter 'reuploadKeyRequired' for corresponding user's exploratories with allowable statuses.
-	 *
-	 * @param user                user.
-	 * @param reuploadKeyRequired true/false.
-	 * @param exploratoryStatuses allowable exploratories' statuses.
-	 */
-	@Override
-	public void updateExploratoriesReuploadKeyFlag(String user, boolean reuploadKeyRequired,
-												   UserInstanceStatus... exploratoryStatuses) {
-		exploratoryDAO.updateReuploadKeyForExploratories(user, reuploadKeyRequired, exploratoryStatuses);
-	}
-
-	/**
-	 * Returns list of user's exploratories and corresponding computational resources where both of them have
-	 * predefined statuses.
-	 *
-	 * @param user                user.
-	 * @param exploratoryStatus   status for exploratory environment.
-	 * @param computationalStatus status for computational resource affiliated with the exploratory.
-	 * @return list with user instances.
-	 */
-	@Override
-	public List<UserInstanceDTO> getInstancesWithStatuses(String user, UserInstanceStatus exploratoryStatus,
-														  UserInstanceStatus computationalStatus) {
-		return getExploratoriesWithStatus(user, exploratoryStatus).stream()
-				.map(e -> e.withResources(computationalResourcesWithStatus(e, computationalStatus)))
-				.collect(Collectors.toList());
+				.forEach(ui -> updateExploratoryStatus(project, ui.getExploratoryName(), status, ui.getUser()));
 	}
 
 	@Override
-	public void updateClusterConfig(UserInfo userInfo, String exploratoryName, List<ClusterConfig> config) {
+	public void updateClusterConfig(UserInfo userInfo, String project, String exploratoryName, List<ClusterConfig> config) {
 		final String userName = userInfo.getName();
 		final String token = userInfo.getAccessToken();
-		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchRunningExploratoryFields(userName,
-				exploratoryName);
+		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchRunningExploratoryFields(userName, project, exploratoryName);
+		EndpointDTO endpointDTO = endpointService.get(userInstanceDTO.getEndpoint());
 		final ExploratoryReconfigureSparkClusterActionDTO updateClusterConfigDTO =
-				requestBuilder.newClusterConfigUpdate(userInfo, userInstanceDTO, config);
-		final String uuid =
-				provisioningService.post(endpointService.get(userInstanceDTO.getEndpoint()).getUrl() + EXPLORATORY_RECONFIGURE_SPARK, token, updateClusterConfigDTO,
-						String.class);
+				requestBuilder.newClusterConfigUpdate(userInfo, userInstanceDTO, config, endpointDTO);
+		final String uuid = provisioningService.post(endpointDTO.getUrl() + EXPLORATORY_RECONFIGURE_SPARK,
+				token, updateClusterConfigDTO,
+				String.class);
 		requestId.put(userName, uuid);
 		exploratoryDAO.updateExploratoryFields(new ExploratoryStatusDTO()
 				.withUser(userName)
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withConfig(config)
 				.withStatus(UserInstanceStatus.RECONFIGURING.toString()));
@@ -190,22 +180,61 @@
 	 * Returns user instance's data by it's name.
 	 *
 	 * @param user            user.
+	 * @param project
 	 * @param exploratoryName name of exploratory.
 	 * @return corresponding user instance's data or empty data if resource doesn't exist.
 	 */
 	@Override
-	public Optional<UserInstanceDTO> getUserInstance(String user, String exploratoryName) {
+	public Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName) {
 		try {
-			return Optional.of(exploratoryDAO.fetchExploratoryFields(user, exploratoryName));
+			return Optional.of(exploratoryDAO.fetchExploratoryFields(user, project, exploratoryName));
 		} catch (DlabException e) {
-			log.warn("User instance with exploratory name {} for user {} not found.", exploratoryName, user);
+			log.warn("User instance with exploratory {}, project {} for user {} not found.", exploratoryName, project, user);
 		}
 		return Optional.empty();
 	}
 
 	@Override
-	public List<ClusterConfig> getClusterConfig(UserInfo user, String exploratoryName) {
-		return exploratoryDAO.getClusterConfig(user.getName(), exploratoryName);
+	public Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName, boolean includeCompResources) {
+		try {
+			return Optional.of(exploratoryDAO.fetchExploratoryFields(user, project, exploratoryName, includeCompResources));
+		} catch (DlabException e) {
+			log.warn("User instance with exploratory {}, project {} for user {} not found.", exploratoryName, project, user);
+		}
+		return Optional.empty();
+	}
+
+	@Override
+	public List<UserInstanceDTO> findAll() {
+		return exploratoryDAO.getInstances();
+	}
+
+	@Override
+	public List<UserInstanceDTO> findAll(Set<ProjectDTO> projects) {
+		List<String> projectNames = projects
+				.stream()
+				.map(ProjectDTO::getName)
+				.collect(Collectors.toList());
+		return exploratoryDAO.fetchExploratoryFieldsForProjectWithComp(projectNames);
+	}
+
+	@Override
+	public List<ClusterConfig> getClusterConfig(UserInfo user, String project, String exploratoryName) {
+		return exploratoryDAO.getClusterConfig(user.getName(), project, exploratoryName);
+	}
+
+	@Override
+	public ExploratoryCreatePopUp getUserInstances(UserInfo user) {
+		List<ProjectDTO> userProjects = projectService.getUserProjects(user, false);
+		Map<String, List<String>> collect = userProjects.stream()
+				.collect(Collectors.toMap(ProjectDTO::getName, this::getProjectExploratoryNames));
+		return new ExploratoryCreatePopUp(userProjects, collect);
+	}
+
+	private List<String> getProjectExploratoryNames(ProjectDTO project) {
+		return exploratoryDAO.fetchExploratoryFieldsForProject(project.getName()).stream()
+				.map(UserInstanceDTO::getExploratoryName)
+				.collect(Collectors.toList());
 	}
 
 
@@ -217,63 +246,57 @@
 	}
 
 	/**
-	 * Returns list of user's exploratories with predefined status.
-	 *
-	 * @param user   user.
-	 * @param status status for exploratory environment.
-	 * @return list of user's instances.
-	 */
-	private List<UserInstanceDTO> getExploratoriesWithStatus(String user, UserInstanceStatus status) {
-		return exploratoryDAO.fetchUserExploratoriesWhereStatusIn(user, true, status);
-	}
-
-	/**
 	 * Sends the post request to the provisioning service and update the status of exploratory environment.
 	 *
 	 * @param userInfo        user info.
+	 * @param project         name of project
 	 * @param exploratoryName name of exploratory environment.
 	 * @param action          action for exploratory environment.
 	 * @param status          status for exploratory environment.
 	 * @return Invocation request as JSON string.
 	 */
-	private String action(UserInfo userInfo, String exploratoryName, String action, UserInstanceStatus status) {
+	private String action(UserInfo userInfo, String project, String exploratoryName, String action, UserInstanceStatus status) {
 		try {
-			updateExploratoryStatus(exploratoryName, status, userInfo.getName());
+			updateExploratoryStatus(project, exploratoryName, status, userInfo.getName());
 
-			UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), exploratoryName);
+			UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, exploratoryName);
+			EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 			final String uuid =
-					provisioningService.post(endpointService.get(userInstance.getEndpoint()).getUrl() + action,
-							userInfo.getAccessToken(),
-							getExploratoryActionDto(userInfo, status, userInstance), String.class);
+					provisioningService.post(endpointDTO.getUrl() + action, userInfo.getAccessToken(),
+							getExploratoryActionDto(userInfo, status, userInstance, endpointDTO), String.class);
 			requestId.put(userInfo.getName(), uuid);
 			return uuid;
 		} catch (Exception t) {
-			log.error("Could not " + action + " exploratory environment {} for user {}", exploratoryName, userInfo
-					.getName(), t);
-			updateExploratoryStatusSilent(userInfo.getName(), exploratoryName, FAILED);
-			throw new DlabException("Could not " + action + " exploratory environment " + exploratoryName + ": " +
-					t.getLocalizedMessage(), t);
+			log.error("Could not {} exploratory environment {} for user {}",
+					StringUtils.substringAfter(action, "/"), exploratoryName, userInfo.getName(), t);
+			updateExploratoryStatusSilent(userInfo.getName(), project, exploratoryName, FAILED);
+			final String errorMsg = String.format("Could not %s exploratory environment %s: %s",
+					StringUtils.substringAfter(action, "/"), exploratoryName,
+					Optional.ofNullable(t.getCause()).map(Throwable::getMessage).orElse(t.getMessage()));
+			throw new DlabException(errorMsg, t);
 		}
 	}
 
-	private void updateExploratoryStatus(String exploratoryName, UserInstanceStatus status, String user) {
-		updateExploratoryStatus(user, exploratoryName, status);
+	private void updateExploratoryStatus(String project, String exploratoryName, UserInstanceStatus status, String user) {
+		updateExploratoryStatus(user, project, exploratoryName, status);
 
 		if (status == STOPPING) {
-			updateComputationalStatuses(user, exploratoryName, STOPPING, TERMINATING, FAILED, TERMINATED, STOPPED);
+			updateComputationalStatuses(user, project, exploratoryName, STOPPING, TERMINATING, FAILED, TERMINATED, STOPPED);
 		} else if (status == TERMINATING) {
-			updateComputationalStatuses(user, exploratoryName, TERMINATING, TERMINATING, TERMINATED, FAILED);
+			updateComputationalStatuses(user, project, exploratoryName, TERMINATING, TERMINATING, TERMINATED, FAILED);
+		} else if (status == TERMINATED) {
+			updateComputationalStatuses(user, project, exploratoryName, TERMINATED, TERMINATED, TERMINATED, FAILED);
 		}
 	}
 
 	private ExploratoryActionDTO<?> getExploratoryActionDto(UserInfo userInfo, UserInstanceStatus status,
-															UserInstanceDTO userInstance) {
+															UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
 		ExploratoryActionDTO<?> dto;
 		if (status != UserInstanceStatus.STARTING) {
-			dto = requestBuilder.newExploratoryStop(userInfo, userInstance);
+			dto = requestBuilder.newExploratoryStop(userInfo, userInstance, endpointDTO);
 		} else {
 			dto = requestBuilder.newExploratoryStart(
-					userInfo, userInstance, gitCredsDAO.findGitCreds(userInfo.getName()));
+					userInfo, userInstance, endpointDTO, gitCredsDAO.findGitCreds(userInfo.getName()));
 
 		}
 		return dto;
@@ -284,11 +307,12 @@
 	 * Updates the status of exploratory environment.
 	 *
 	 * @param user            user name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private void updateExploratoryStatus(String user, String exploratoryName, UserInstanceStatus status) {
-		StatusEnvBaseDTO<?> exploratoryStatus = createStatusDTO(user, exploratoryName, status);
+	private void updateExploratoryStatus(String user, String project, String exploratoryName, UserInstanceStatus status) {
+		StatusEnvBaseDTO<?> exploratoryStatus = createStatusDTO(user, project, exploratoryName, status);
 		exploratoryDAO.updateExploratoryStatus(exploratoryStatus);
 	}
 
@@ -296,41 +320,44 @@
 	 * Updates the status of exploratory environment without exceptions. If exception occurred then logging it.
 	 *
 	 * @param user            user name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private void updateExploratoryStatusSilent(String user, String exploratoryName, UserInstanceStatus status) {
+	private void updateExploratoryStatusSilent(String user, String project, String exploratoryName, UserInstanceStatus status) {
 		try {
-			updateExploratoryStatus(user, exploratoryName, status);
+			updateExploratoryStatus(user, project, exploratoryName, status);
 		} catch (DlabException e) {
 			log.error("Could not update the status of exploratory environment {} for user {} to {}",
 					exploratoryName, user, status, e);
 		}
 	}
 
-	private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus
+	private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus
 			dataEngineStatus, UserInstanceStatus dataEngineServiceStatus, UserInstanceStatus... excludedStatuses) {
 		log.debug("updating status for all computational resources of {} for user {}: DataEngine {}, " +
 				"dataengine-service {}", exploratoryName, user, dataEngineStatus, dataEngineServiceStatus);
-		computationalDAO.updateComputationalStatusesForExploratory(user, exploratoryName, dataEngineStatus,
-				dataEngineServiceStatus, excludedStatuses);
+		computationalDAO.updateComputationalStatusesForExploratory(user, project, exploratoryName,
+				dataEngineStatus, dataEngineServiceStatus, excludedStatuses);
 	}
 
 	/**
 	 * Instantiates and returns the descriptor of exploratory environment status.
 	 *
 	 * @param user            user name
+	 * @param project         project
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private StatusEnvBaseDTO<?> createStatusDTO(String user, String exploratoryName, UserInstanceStatus status) {
+	private StatusEnvBaseDTO<?> createStatusDTO(String user, String project, String exploratoryName, UserInstanceStatus status) {
 		return new ExploratoryStatusDTO()
 				.withUser(user)
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withStatus(status);
 	}
 
-	private UserInstanceDTO getUserInstanceDTO(UserInfo userInfo, Exploratory exploratory, String project) {
+	private UserInstanceDTO getUserInstanceDTO(UserInfo userInfo, Exploratory exploratory, String project, CloudProvider cloudProvider) {
 		final UserInstanceDTO userInstance = new UserInstanceDTO()
 				.withUser(userInfo.getName())
 				.withExploratoryName(exploratory.getName())
@@ -342,19 +369,21 @@
 				.withShape(exploratory.getShape())
 				.withProject(project)
 				.withEndpoint(exploratory.getEndpoint())
+				.withCloudProvider(cloudProvider.toString())
 				.withTags(tagService.getResourceTags(userInfo, exploratory.getEndpoint(), project,
 						exploratory.getExploratoryTag()));
 		if (StringUtils.isNotBlank(exploratory.getImageName())) {
-			final List<LibInstallDTO> libInstallDtoList = getImageRelatedLibraries(userInfo, exploratory
-					.getImageName());
+			final List<LibInstallDTO> libInstallDtoList = getImageRelatedLibraries(userInfo, exploratory.getImageName(),
+					project, exploratory.getEndpoint());
 			userInstance.withLibs(libInstallDtoList);
 		}
 		return userInstance;
 	}
 
-	private List<LibInstallDTO> getImageRelatedLibraries(UserInfo userInfo, String imageFullName) {
-		final List<Library> libraries = imageExploratoryDao.getLibraries(userInfo.getName(), imageFullName,
-				ResourceType.EXPLORATORY, LibStatus.INSTALLED);
+	private List<LibInstallDTO> getImageRelatedLibraries(UserInfo userInfo, String imageFullName, String project,
+														 String endpoint) {
+		final List<Library> libraries = imageExploratoryDao.getLibraries(userInfo.getName(), imageFullName, project,
+				endpoint, LibStatus.INSTALLED);
 		return toLibInstallDtoList(libraries);
 	}
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImpl.java
index 4931276..6d94a0b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImpl.java
@@ -22,6 +22,7 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.GitCredsDAO;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.GitCredentialService;
@@ -98,9 +99,9 @@
 		try {
 			log.debug("Updating GIT creds for user {} on exploratory {}",
 					userInfo.getName(), instance.getExploratoryName());
-			ExploratoryGitCredsUpdateDTO dto = requestBuilder.newGitCredentialsUpdate(userInfo, instance, formDTO);
-			final String uuid = provisioningService
-					.post(endpointService.get(instance.getEndpoint()).getUrl() + EXPLORATORY_GIT_CREDS,
+			EndpointDTO endpointDTO = endpointService.get(instance.getEndpoint());
+			ExploratoryGitCredsUpdateDTO dto = requestBuilder.newGitCredentialsUpdate(userInfo, instance, endpointDTO, formDTO);
+			final String uuid = provisioningService.post(endpointDTO.getUrl() + EXPLORATORY_GIT_CREDS,
 							userInfo.getAccessToken(), dto, String.class);
 			requestId.put(userInfo.getName(), uuid);
 		} catch (Exception t) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
index 905b504..5cb3a64 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
@@ -23,9 +23,12 @@
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryLibDAO;
 import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.ImageExploratoryService;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
 import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.dto.UserInstanceDTO;
@@ -70,17 +73,19 @@
 	private RequestBuilder requestBuilder;
 	@Inject
 	private EndpointService endpointService;
+	@Inject
+	private ProjectService projectService;
 
 	@Override
-	public String createImage(UserInfo user, String exploratoryName, String imageName, String imageDescription) {
-
-		UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(user.getName(), exploratoryName);
+	public String createImage(UserInfo user, String project, String exploratoryName, String imageName, String imageDescription) {
+		ProjectDTO projectDTO = projectService.get(project);
+		UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(user.getName(), project, exploratoryName);
 
 		if (imageExploratoryDao.exist(imageName, userInstance.getProject())) {
 			log.error(String.format(IMAGE_EXISTS_MSG, imageName, userInstance.getProject()));
 			throw new ResourceAlreadyExistException(String.format(IMAGE_EXISTS_MSG, imageName, userInstance.getProject()));
 		}
-		final List<Library> libraries = libDAO.getLibraries(user.getName(), exploratoryName);
+		final List<Library> libraries = libDAO.getLibraries(user.getName(), project, exploratoryName);
 
 		imageExploratoryDao.save(Image.builder()
 				.name(imageName)
@@ -97,11 +102,14 @@
 
 		exploratoryDAO.updateExploratoryStatus(new ExploratoryStatusDTO()
 				.withUser(user.getName())
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withStatus(UserInstanceStatus.CREATING_IMAGE));
 
-		return provisioningService.post(endpointService.get(userInstance.getEndpoint()).getUrl() + ExploratoryAPI.EXPLORATORY_IMAGE, user.getAccessToken(),
-				requestBuilder.newExploratoryImageCreate(user, userInstance, imageName), String.class);
+		EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
+		return provisioningService.post(endpointDTO.getUrl() + ExploratoryAPI.EXPLORATORY_IMAGE,
+				user.getAccessToken(),
+				requestBuilder.newExploratoryImageCreate(user, userInstance, imageName, endpointDTO, projectDTO), String.class);
 	}
 
 	@Override
@@ -110,13 +118,14 @@
 				exploratoryName, image.getUser());
 		exploratoryDAO.updateExploratoryStatus(new ExploratoryStatusDTO()
 				.withUser(image.getUser())
+				.withProject(image.getProject())
 				.withExploratoryName(exploratoryName)
 				.withStatus(UserInstanceStatus.RUNNING));
 		imageExploratoryDao.updateImageFields(image);
 		if (newNotebookIp != null) {
 			log.debug("Changing exploratory ip with name {} for user {} to {}", exploratoryName, image.getUser(),
 					newNotebookIp);
-			exploratoryDAO.updateExploratoryIp(image.getUser(), newNotebookIp, exploratoryName);
+			exploratoryDAO.updateExploratoryIp(image.getUser(), image.getProject(), newNotebookIp, exploratoryName);
 		}
 
 	}
@@ -127,8 +136,8 @@
 	}
 
 	@Override
-	public ImageInfoRecord getImage(String user, String name) {
-		return imageExploratoryDao.getImage(user, name).orElseThrow(() ->
+	public ImageInfoRecord getImage(String user, String name, String project, String endpoint) {
+		return imageExploratoryDao.getImage(user, name, project, endpoint).orElseThrow(() ->
 				new ResourceNotFoundException(String.format(IMAGE_NOT_FOUND_MSG, name, user)));
 	}
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
index 866808d..dd370dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
@@ -22,9 +22,9 @@
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.dao.EnvDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.service.ComputationalService;
-import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.InactivityService;
 import com.epam.dlab.backendapi.service.SecurityService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
@@ -58,11 +58,9 @@
 	@Inject
 	private RequestId requestId;
 	@Inject
-	private ComputationalService computationalService;
-	@Inject
-	private ExploratoryService exploratoryService;
-	@Inject
 	private SecurityService securityService;
+	@Inject
+	private EndpointService endpointService;
 
 	@Override
 	public void updateRunningResourcesLastActivity() {
@@ -77,9 +75,9 @@
 	}
 
 	@Override
-	public void updateLastActivityForComputational(UserInfo userInfo, String exploratoryName,
+	public void updateLastActivityForComputational(UserInfo userInfo, String project, String exploratoryName,
 												   String computationalName, LocalDateTime lastActivity) {
-		computationalDAO.updateLastActivity(userInfo.getName(), exploratoryName, computationalName, lastActivity);
+		computationalDAO.updateLastActivity(userInfo.getName(), project, exploratoryName, computationalName, lastActivity);
 	}
 
 	private void updateLastActivity(UserInstanceDTO ui) {
@@ -93,19 +91,20 @@
 	}
 
 	private void updateComputationalLastActivity(UserInfo userInfo, UserInstanceDTO ui, UserComputationalResource cr) {
-		final ComputationalCheckInactivityDTO dto =
-				requestBuilder.newComputationalCheckInactivity(userInfo, ui, cr);
+		EndpointDTO endpointDTO = endpointService.get(ui.getEndpoint());
+		final ComputationalCheckInactivityDTO dto = requestBuilder.newComputationalCheckInactivity(userInfo, ui, cr, endpointDTO);
 		final String uuid =
-				provisioningService.post(InfrasctructureAPI.COMPUTATIONAL_CHECK_INACTIVITY,
+				provisioningService.post(endpointDTO.getUrl() + InfrasctructureAPI.COMPUTATIONAL_CHECK_INACTIVITY,
 						userInfo.getAccessToken(), dto, String.class);
 		requestId.put(userInfo.getName(), uuid);
 	}
 
 	private void updateExploratoryLastActivity(UserInfo userInfo, UserInstanceDTO ui) {
+		EndpointDTO endpointDTO = endpointService.get(ui.getEndpoint());
 		final ExploratoryCheckInactivityAction dto =
-				requestBuilder.newExploratoryCheckInactivityAction(userInfo, ui);
+				requestBuilder.newExploratoryCheckInactivityAction(userInfo, ui, endpointDTO);
 		final String uuid =
-				provisioningService.post(InfrasctructureAPI.EXPLORATORY_CHECK_INACTIVITY,
+				provisioningService.post(endpointDTO.getUrl() + InfrasctructureAPI.EXPLORATORY_CHECK_INACTIVITY,
 						userInfo.getAccessToken(), dto, String.class);
 		requestId.put(userInfo.getName(), uuid);
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceBase.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceBase.java
deleted file mode 100644
index 751d399..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceBase.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.impl;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.EnvDAO;
-import com.epam.dlab.backendapi.dao.ExploratoryDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
-import com.epam.dlab.backendapi.resources.dto.HealthStatusPageDTO;
-import com.epam.dlab.backendapi.resources.dto.ProjectInfrastructureInfo;
-import com.epam.dlab.backendapi.service.InfrastructureInfoService;
-import com.epam.dlab.backendapi.service.ProjectService;
-import com.epam.dlab.dto.InfrastructureMetaInfoDTO;
-import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.exceptions.DlabException;
-import com.google.inject.Inject;
-import com.jcabi.manifests.Manifests;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-import java.util.stream.StreamSupport;
-
-@Slf4j
-public abstract class InfrastructureInfoServiceBase<T> implements InfrastructureInfoService {
-
-	private static final String RELEASE_NOTES_FORMAT = "https://github.com/apache/incubator-dlab/blob/%s" +
-			"/RELEASE_NOTES.md";
-	@Inject
-	private ExploratoryDAO expDAO;
-	@Inject
-	private KeyDAO keyDAO;
-	@Inject
-	private EnvDAO envDAO;
-	@Inject
-	private SelfServiceApplicationConfiguration configuration;
-	@Inject
-	private BillingDAO billingDAO;
-	@Inject
-	private ProjectService projectService;
-
-
-	@SuppressWarnings("unchecked")
-	private Map<String, String> getSharedInfo(EdgeInfo edgeInfo) {
-		return getSharedInfo((T) edgeInfo);
-	}
-
-	@Override
-	public List<ProjectInfrastructureInfo> getUserResources(String user) {
-		log.debug("Loading list of provisioned resources for user {}", user);
-		try {
-			Iterable<Document> documents = expDAO.findExploratory(user);
-
-			return StreamSupport.stream(documents.spliterator(),
-					false)
-					.collect(Collectors.groupingBy(d -> d.getString("project")))
-					.entrySet()
-					.stream()
-					.map(e -> {
-
-						final Map<String, Map<String, String>> projectEdges =
-								projectService.get(e.getKey()).getEndpoints().stream()
-										.collect(Collectors.toMap(ProjectEndpointDTO::getName,
-												endpointDTO -> getSharedInfo(endpointDTO.getEdgeInfo())));
-						return new ProjectInfrastructureInfo(e.getKey(),
-								billingDAO.getBillingProjectQuoteUsed(e.getKey()),
-								projectEdges, e.getValue());
-					})
-					.collect(Collectors.toList());
-		} catch (Exception e) {
-			log.error("Could not load list of provisioned resources for user: {}", user, e);
-			throw new DlabException("Could not load list of provisioned resources for user: ");
-		}
-	}
-
-	@Override
-	public HealthStatusPageDTO getHeathStatus(UserInfo userInfo, boolean fullReport, boolean isAdmin) {
-		final String user = userInfo.getName();
-		log.debug("Request the status of resources for user {}, report type {}", user, fullReport);
-		try {
-
-			return envDAO.getHealthStatusPageDTO(user, fullReport)
-					.withBillingEnabled(configuration.isBillingSchedulerEnabled())
-					.withAdmin(isAdmin)
-					.withProjectAssinged(projectService.isAnyProjectAssigned(userInfo))
-					.withBillingQuoteUsed(billingDAO.getBillingQuoteUsed())
-					.withBillingUserQuoteUsed(billingDAO.getBillingUserQuoteUsed(user));
-		} catch (Exception e) {
-			log.warn("Could not return status of resources for user {}: {}", user, e.getLocalizedMessage(), e);
-			throw new DlabException(e.getMessage(), e);
-		}
-	}
-
-	@Override
-	public InfrastructureMetaInfoDTO getInfrastructureMetaInfo() {
-		final String branch = Manifests.read("GIT-Branch");
-		return InfrastructureMetaInfoDTO.builder()
-				.branch(branch)
-				.commit(Manifests.read("GIT-Commit"))
-				.version(Manifests.read("DLab-Version"))
-				.releaseNotes(String.format(RELEASE_NOTES_FORMAT, branch))
-				.build();
-	}
-
-	protected abstract Map<String, String> getSharedInfo(T sharedInfo);
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceImpl.java
new file mode 100644
index 0000000..b1eac51
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceImpl.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.service.impl;
+
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
+import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.dao.ExploratoryDAO;
+import com.epam.dlab.backendapi.domain.BillingReport;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
+import com.epam.dlab.backendapi.resources.dto.HealthStatusEnum;
+import com.epam.dlab.backendapi.resources.dto.HealthStatusPageDTO;
+import com.epam.dlab.backendapi.resources.dto.ProjectInfrastructureInfo;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.backendapi.service.BillingService;
+import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.InfrastructureInfoService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.dto.InfrastructureMetaInfoDTO;
+import com.epam.dlab.dto.aws.edge.EdgeInfoAws;
+import com.epam.dlab.dto.azure.edge.EdgeInfoAzure;
+import com.epam.dlab.dto.base.edge.EdgeInfo;
+import com.epam.dlab.dto.gcp.edge.EdgeInfoGcp;
+import com.epam.dlab.exceptions.DlabException;
+import com.google.inject.Inject;
+import com.jcabi.manifests.Manifests;
+import lombok.extern.slf4j.Slf4j;
+import org.bson.Document;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+@Slf4j
+public class InfrastructureInfoServiceImpl implements InfrastructureInfoService {
+
+	private static final String RELEASE_NOTES_FORMAT = "https://github.com/apache/incubator-dlab/blob/%s" +
+			"/RELEASE_NOTES.md";
+	private final ExploratoryDAO expDAO;
+	private final SelfServiceApplicationConfiguration configuration;
+	private final BillingDAO billingDAO;
+	private final ProjectService projectService;
+	private final EndpointService endpointService;
+	private final BillingService billingService;
+
+	@Inject
+	public InfrastructureInfoServiceImpl(ExploratoryDAO expDAO, SelfServiceApplicationConfiguration configuration,
+										 BillingDAO billingDAO, ProjectService projectService, EndpointService endpointService,
+										 BillingService billingService) {
+		this.expDAO = expDAO;
+		this.configuration = configuration;
+		this.billingDAO = billingDAO;
+		this.projectService = projectService;
+		this.endpointService = endpointService;
+		this.billingService = billingService;
+	}
+
+	@Override
+	public List<ProjectInfrastructureInfo> getUserResources(UserInfo user) {
+		log.debug("Loading list of provisioned resources for user {}", user);
+		try {
+			Iterable<Document> documents = expDAO.findExploratory(user.getName());
+			List<EndpointDTO> allEndpoints = endpointService.getEndpoints();
+			return StreamSupport.stream(documents.spliterator(), false)
+					.collect(Collectors.groupingBy(d -> d.getString("project")))
+					.entrySet()
+					.stream()
+					.map(e -> {
+						List<ProjectEndpointDTO> endpoints = projectService.get(e.getKey()).getEndpoints();
+						List<EndpointDTO> endpointResult = allEndpoints.stream()
+								.filter(endpoint -> endpoints.stream()
+										.anyMatch(endpoint1 -> endpoint1.getName().equals(endpoint.getName())))
+								.collect(Collectors.toList());
+
+						List<BillingReport> billingData = e.getValue()
+								.stream()
+								.map(exp ->
+										billingService.getExploratoryBillingData(exp.getString("project"), exp.getString("endpoint"),
+												exp.getString("exploratory_name"),
+												Optional.ofNullable(exp.get("computational_resources")).map(cr -> (List<Document>) cr).get()
+														.stream()
+														.map(cr -> cr.getString("computational_name"))
+														.collect(Collectors.toList()))
+								)
+								.collect(Collectors.toList());
+
+						final Map<String, Map<String, String>> projectEdges =
+								endpoints
+										.stream()
+										.collect(Collectors.toMap(ProjectEndpointDTO::getName, this::getSharedInfo));
+						return new ProjectInfrastructureInfo(e.getKey(), billingDAO.getBillingProjectQuoteUsed(e.getKey()),
+								projectEdges, e.getValue(), billingData, endpointResult);
+					})
+					.collect(Collectors.toList());
+		} catch (Exception e) {
+			log.error("Could not load list of provisioned resources for user: {}", user, e);
+			throw new DlabException("Could not load list of provisioned resources for user: ");
+		}
+	}
+
+	@Override
+	public HealthStatusPageDTO getHeathStatus(UserInfo userInfo, boolean fullReport) {
+		final String user = userInfo.getName();
+		log.debug("Request the status of resources for user {}, report type {}", user, fullReport);
+		try {
+			return HealthStatusPageDTO.builder()
+					.status(HealthStatusEnum.OK.toString())
+					.listResources(Collections.emptyList())
+					.billingEnabled(configuration.isBillingSchedulerEnabled())
+					.projectAdmin(UserRoles.isProjectAdmin(userInfo))
+					.admin(UserRoles.isAdmin(userInfo))
+					.projectAssigned(projectService.isAnyProjectAssigned(userInfo))
+					.billingQuoteUsed(billingDAO.getBillingQuoteUsed())
+					.billingUserQuoteUsed(billingDAO.getBillingUserQuoteUsed(user))
+					.build();
+		} catch (Exception e) {
+			log.warn("Could not return status of resources for user {}: {}", user, e.getLocalizedMessage(), e);
+			throw new DlabException(e.getMessage(), e);
+		}
+	}
+
+	@Override
+	public InfrastructureMetaInfoDTO getInfrastructureMetaInfo() {
+		final String branch = Manifests.read("GIT-Branch");
+		return InfrastructureMetaInfoDTO.builder()
+				.branch(branch)
+				.commit(Manifests.read("GIT-Commit"))
+				.version(Manifests.read("DLab-Version"))
+				.releaseNotes(String.format(RELEASE_NOTES_FORMAT, branch))
+				.build();
+	}
+
+	private Map<String, String> getSharedInfo(ProjectEndpointDTO endpointDTO) {
+		Optional<EdgeInfo> edgeInfo = Optional.ofNullable(endpointDTO.getEdgeInfo());
+		if (!edgeInfo.isPresent()) {
+			return Collections.emptyMap();
+		}
+		EdgeInfo edge = edgeInfo.get();
+		Map<String, String> shared = new HashMap<>();
+
+		shared.put("status", endpointDTO.getStatus().toString());
+		shared.put("edge_node_ip", edge.getPublicIp());
+		if (edge instanceof EdgeInfoAws) {
+			EdgeInfoAws edgeInfoAws = (EdgeInfoAws) edge;
+			shared.put("user_own_bicket_name", edgeInfoAws.getUserOwnBucketName());
+			shared.put("shared_bucket_name", edgeInfoAws.getSharedBucketName());
+		} else if (edge instanceof EdgeInfoAzure) {
+			EdgeInfoAzure edgeInfoAzure = (EdgeInfoAzure) edge;
+			shared.put("user_container_name", edgeInfoAzure.getUserContainerName());
+			shared.put("shared_container_name", edgeInfoAzure.getSharedContainerName());
+			shared.put("user_storage_account_name", edgeInfoAzure.getUserStorageAccountName());
+			shared.put("shared_storage_account_name", edgeInfoAzure.getSharedStorageAccountName());
+			shared.put("datalake_name", edgeInfoAzure.getDataLakeName());
+			shared.put("datalake_user_directory_name", edgeInfoAzure.getDataLakeDirectoryName());
+			shared.put("datalake_shared_directory_name", edgeInfoAzure.getDataLakeSharedDirectoryName());
+		} else if (edge instanceof EdgeInfoGcp) {
+			EdgeInfoGcp edgeInfoGcp = (EdgeInfoGcp) edge;
+			shared.put("user_own_bucket_name", edgeInfoGcp.getUserOwnBucketName());
+			shared.put("shared_bucket_name", edgeInfoGcp.getSharedBucketName());
+		}
+
+		return shared;
+	}
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceBase.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceImpl.java
similarity index 65%
rename from services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceBase.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceImpl.java
index c3268f1..31f73f3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceBase.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceImpl.java
@@ -23,8 +23,11 @@
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.dao.SettingsDAO;
-import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.dao.UserGroupDao;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneConfiguration;
+import com.epam.dlab.backendapi.resources.dto.aws.AwsEmrConfiguration;
+import com.epam.dlab.backendapi.resources.dto.gcp.GcpDataprocConfiguration;
 import com.epam.dlab.backendapi.roles.RoleType;
 import com.epam.dlab.backendapi.roles.UserRoles;
 import com.epam.dlab.backendapi.service.EndpointService;
@@ -53,17 +56,18 @@
 import static com.epam.dlab.rest.contracts.DockerAPI.DOCKER_EXPLORATORY;
 
 @Slf4j
-public abstract class InfrastructureTemplateServiceBase implements InfrastructureTemplateService {
+public class InfrastructureTemplateServiceImpl implements InfrastructureTemplateService {
 
 	@Inject
 	private SelfServiceApplicationConfiguration configuration;
-
 	@Inject
 	private SettingsDAO settingsDAO;
 	@Inject
 	private ProjectDAO projectDAO;
 	@Inject
 	private EndpointService endpointService;
+	@Inject
+	private UserGroupDao userGroupDao;
 
 
 	@Inject
@@ -75,15 +79,16 @@
 
 		log.debug("Loading list of exploratory templates for user {} for project {}", user.getName(), project);
 		try {
+			EndpointDTO endpointDTO = endpointService.get(endpoint);
 			ExploratoryMetadataDTO[] array =
-					provisioningService.get(endpointService.get(endpoint).getUrl() + DOCKER_EXPLORATORY,
+					provisioningService.get(endpointDTO.getUrl() + DOCKER_EXPLORATORY,
 							user.getAccessToken(),
 							ExploratoryMetadataDTO[].class);
 
-			final Set<String> roles = getRoles(user, project);
+			final Set<String> roles = userGroupDao.getUserGroups(user.getName());
 			return Arrays.stream(array)
 					.peek(e -> e.setImage(getSimpleImageName(e.getImage())))
-					.filter(e -> exploratoryGpuIssuesAzureFilter(e) &&
+					.filter(e -> exploratoryGpuIssuesAzureFilter(e, endpointDTO.getCloudProvider()) &&
 							UserRoles.checkAccess(user, RoleType.EXPLORATORY, e.getImage(), roles))
 					.peek(e -> filterShapes(user, e.getExploratoryEnvironmentShapes(), RoleType.EXPLORATORY_SHAPES,
 							roles))
@@ -114,19 +119,20 @@
 
 		log.debug("Loading list of computational templates for user {}", user.getName());
 		try {
+			EndpointDTO endpointDTO = endpointService.get(endpoint);
 			ComputationalMetadataDTO[] array =
-					provisioningService.get(endpointService.get(endpoint).getUrl() + DOCKER_COMPUTATIONAL,
+					provisioningService.get(endpointDTO.getUrl() + DOCKER_COMPUTATIONAL,
 							user.getAccessToken(), ComputationalMetadataDTO[]
 									.class);
 
-			final Set<String> roles = getRoles(user, project);
+			final Set<String> roles = userGroupDao.getUserGroups(user.getName());
 
 			return Arrays.stream(array)
 					.peek(e -> e.setImage(getSimpleImageName(e.getImage())))
 					.peek(e -> filterShapes(user, e.getComputationResourceShapes(), RoleType.COMPUTATIONAL_SHAPES,
 							user.getRoles()))
 					.filter(e -> UserRoles.checkAccess(user, RoleType.COMPUTATIONAL, e.getImage(), roles))
-					.map(this::fullComputationalTemplate)
+					.map(comp -> fullComputationalTemplate(comp, endpointDTO.getCloudProvider()))
 					.collect(Collectors.toList());
 
 		} catch (DlabException e) {
@@ -135,22 +141,12 @@
 		}
 	}
 
-	private Set<String> getRoles(UserInfo user, String project) {
-		return projectDAO.get(project)
-				.map(ProjectDTO::getGroups)
-				.orElse(user.getRoles());
-	}
-
-	protected abstract FullComputationalTemplate getCloudFullComputationalTemplate(ComputationalMetadataDTO
-																						   metadataDTO);
-
 	/**
 	 * Temporary filter for creation of exploratory env due to Azure issues
 	 */
-	private boolean exploratoryGpuIssuesAzureFilter(ExploratoryMetadataDTO e) {
-		return (!"redhat".equals(settingsDAO.getConfOsFamily()) || configuration.getCloudProvider() != CloudProvider
-				.AZURE)
-				|| !(e.getImage().endsWith("deeplearning") || e.getImage().endsWith("tensor"));
+	private boolean exploratoryGpuIssuesAzureFilter(ExploratoryMetadataDTO e, CloudProvider cloudProvider) {
+		return (!"redhat".equals(settingsDAO.getConfOsFamily()) || cloudProvider != CloudProvider.AZURE) ||
+				!(e.getImage().endsWith("deeplearning") || e.getImage().endsWith("tensor"));
 	}
 
 	/**
@@ -166,16 +162,18 @@
 	/**
 	 * Wraps metadata with limits
 	 *
-	 * @param metadataDTO metadata
+	 * @param metadataDTO   metadata
+	 * @param cloudProvider cloudProvider
 	 * @return wrapped object
 	 */
 
-	private FullComputationalTemplate fullComputationalTemplate(ComputationalMetadataDTO metadataDTO) {
+	private FullComputationalTemplate fullComputationalTemplate(ComputationalMetadataDTO metadataDTO,
+																CloudProvider cloudProvider) {
 
 		DataEngineType dataEngineType = DataEngineType.fromDockerImageName(metadataDTO.getImage());
 
 		if (dataEngineType == DataEngineType.CLOUD_SERVICE) {
-			return getCloudFullComputationalTemplate(metadataDTO);
+			return getCloudFullComputationalTemplate(metadataDTO, cloudProvider);
 		} else if (dataEngineType == DataEngineType.SPARK_STANDALONE) {
 			return new SparkFullComputationalTemplate(metadataDTO,
 					SparkStandaloneConfiguration.builder()
@@ -187,6 +185,53 @@
 		}
 	}
 
+	protected FullComputationalTemplate getCloudFullComputationalTemplate(ComputationalMetadataDTO metadataDTO,
+																		CloudProvider cloudProvider) {
+		switch (cloudProvider) {
+			case AWS:
+				return new AwsFullComputationalTemplate(metadataDTO,
+						AwsEmrConfiguration.builder()
+								.minEmrInstanceCount(configuration.getMinEmrInstanceCount())
+								.maxEmrInstanceCount(configuration.getMaxEmrInstanceCount())
+								.maxEmrSpotInstanceBidPct(configuration.getMaxEmrSpotInstanceBidPct())
+								.minEmrSpotInstanceBidPct(configuration.getMinEmrSpotInstanceBidPct())
+								.build());
+			case GCP:
+				return new GcpFullComputationalTemplate(metadataDTO,
+						GcpDataprocConfiguration.builder()
+								.minInstanceCount(configuration.getMinInstanceCount())
+								.maxInstanceCount(configuration.getMaxInstanceCount())
+								.minDataprocPreemptibleInstanceCount(configuration.getMinDataprocPreemptibleCount())
+								.build());
+			case AZURE:
+				log.error("Dataengine service is not supported currently for {}", cloudProvider);
+			default:
+				throw new UnsupportedOperationException("Dataengine service is not supported currently for " + cloudProvider);
+		}
+	}
+
+	private class AwsFullComputationalTemplate extends FullComputationalTemplate {
+		@JsonProperty("limits")
+		private AwsEmrConfiguration awsEmrConfiguration;
+
+		AwsFullComputationalTemplate(ComputationalMetadataDTO metadataDTO,
+									 AwsEmrConfiguration awsEmrConfiguration) {
+			super(metadataDTO);
+			this.awsEmrConfiguration = awsEmrConfiguration;
+		}
+	}
+
+	private class GcpFullComputationalTemplate extends FullComputationalTemplate {
+		@JsonProperty("limits")
+		private GcpDataprocConfiguration gcpDataprocConfiguration;
+
+		GcpFullComputationalTemplate(ComputationalMetadataDTO metadataDTO,
+									 GcpDataprocConfiguration gcpDataprocConfiguration) {
+			super(metadataDTO);
+			this.gcpDataprocConfiguration = gcpDataprocConfiguration;
+		}
+	}
+
 	private class SparkFullComputationalTemplate extends FullComputationalTemplate {
 		@JsonProperty("limits")
 		private SparkStandaloneConfiguration sparkStandaloneConfiguration;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
index afadbf6..3fbb170 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
@@ -23,6 +23,7 @@
 import com.epam.dlab.backendapi.dao.BaseDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryLibDAO;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.resources.dto.LibInfoRecord;
 import com.epam.dlab.backendapi.resources.dto.LibKey;
@@ -49,7 +50,13 @@
 import org.apache.commons.lang3.StringUtils;
 import org.bson.Document;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.TreeMap;
 import java.util.stream.Collectors;
 
 @Slf4j
@@ -79,13 +86,13 @@
 
 	@Override
 	@SuppressWarnings("unchecked")
-	public List<Document> getLibs(String user, String exploratoryName, String computationalName) {
+	public List<Document> getLibs(String user, String project, String exploratoryName, String computationalName) {
 		if (StringUtils.isEmpty(computationalName)) {
-			return (List<Document>) libraryDAO.findExploratoryLibraries(user, exploratoryName)
+			return (List<Document>) libraryDAO.findExploratoryLibraries(user, project, exploratoryName)
 					.getOrDefault(ExploratoryLibDAO.EXPLORATORY_LIBS, new ArrayList<>());
 		} else {
-			Document document = (Document) libraryDAO.findComputationalLibraries(user, exploratoryName,
-					computationalName)
+			Document document = (Document) libraryDAO.findComputationalLibraries(user, project,
+					exploratoryName, computationalName)
 					.getOrDefault(ExploratoryLibDAO.COMPUTATIONAL_LIBS, new Document());
 
 			return (List<Document>) document.getOrDefault(computationalName, new ArrayList<>());
@@ -94,8 +101,8 @@
 
 	@Override
 	@SuppressWarnings("unchecked")
-	public List<LibInfoRecord> getLibInfo(String user, String exploratoryName) {
-		Document document = libraryDAO.findAllLibraries(user, exploratoryName);
+	public List<LibInfoRecord> getLibInfo(String user, String project, String exploratoryName) {
+		Document document = libraryDAO.findAllLibraries(user, project, exploratoryName);
 
 		Map<LibKey, List<LibraryStatus>> model = new TreeMap<>(Comparator.comparing(LibKey::getName)
 				.thenComparing(LibKey::getVersion)
@@ -123,51 +130,54 @@
 	}
 
 	@Override
-	public String installComputationalLibs(UserInfo ui, String expName, String compName,
+	public String installComputationalLibs(UserInfo ui, String project, String expName, String compName,
 										   List<LibInstallDTO> libs) {
 
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(ui.getName(), expName, compName);
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(ui.getName(), project, expName, compName);
+		EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 		final String uuid =
-				provisioningService.post(endpointService.get(userInstance.getEndpoint()).getUrl() + ComputationalAPI.COMPUTATIONAL_LIB_INSTALL,
-						ui.getAccessToken(), toComputationalLibraryInstallDto(ui, expName, compName, libs,
-								userInstance),
+				provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_LIB_INSTALL,
+						ui.getAccessToken(),
+						toComputationalLibraryInstallDto(ui, project, expName, compName, libs, userInstance, endpointDTO),
 						String.class);
 		requestId.put(ui.getName(), uuid);
 		return uuid;
 	}
 
 	@Override
-	public String installExploratoryLibs(UserInfo ui, String expName, List<LibInstallDTO> libs) {
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(ui.getName(), expName);
+	public String installExploratoryLibs(UserInfo ui, String project, String expName, List<LibInstallDTO> libs) {
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(ui.getName(), project, expName);
+		EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 		final String uuid =
-				provisioningService.post(endpointService.get(userInstance.getEndpoint()).getUrl() + ExploratoryAPI.EXPLORATORY_LIB_INSTALL, ui.getAccessToken(),
-				toExploratoryLibraryInstallDto(ui, expName, libs, userInstance), String.class);
+				provisioningService.post(endpointDTO.getUrl() + ExploratoryAPI.EXPLORATORY_LIB_INSTALL,
+						ui.getAccessToken(), toExploratoryLibraryInstallDto(ui, project, expName, libs, userInstance, endpointDTO),
+						String.class);
 		requestId.put(ui.getName(), uuid);
 		return uuid;
 	}
 
-	private LibraryInstallDTO toExploratoryLibraryInstallDto(UserInfo userInfo, String exploratoryName,
-															 List<LibInstallDTO> libs, UserInstanceDTO userInstance) {
+	private LibraryInstallDTO toExploratoryLibraryInstallDto(UserInfo userInfo, String project, String exploratoryName,
+															 List<LibInstallDTO> libs, UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
 		final List<LibInstallDTO> libsToInstall = libs.stream()
-				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), exploratoryName,
+				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), project, exploratoryName,
 						lib.getGroup(), lib.getName())))
-				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), exploratoryName, l, l.isOverride()))
+				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), project, exploratoryName, l, l.isOverride()))
 				.collect(Collectors.toList());
-		return requestBuilder.newLibInstall(userInfo, userInstance, libsToInstall);
+		return requestBuilder.newLibInstall(userInfo, userInstance, endpointDTO, libsToInstall);
 	}
 
-	private LibraryInstallDTO toComputationalLibraryInstallDto(UserInfo userInfo, String expName, String compName,
-															   List<LibInstallDTO> libs,
-															   UserInstanceDTO userInstance) {
+	private LibraryInstallDTO toComputationalLibraryInstallDto(UserInfo userInfo, String project, String expName,
+															   String compName, List<LibInstallDTO> libs,
+															   UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
 
 		final UserComputationalResource computationalResource = getComputationalResource(compName, userInstance);
 		final List<LibInstallDTO> libsToInstall = libs.stream()
-				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), expName,
-						compName, lib.getGroup(), lib.getName())))
-				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), expName, compName, l,
-						l.isOverride()))
+				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), project,
+						expName, compName, lib.getGroup(), lib.getName())))
+				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), project, expName, compName,
+						l, l.isOverride()))
 				.collect(Collectors.toList());
-		return requestBuilder.newLibInstall(userInfo, userInstance, computationalResource, libsToInstall);
+		return requestBuilder.newLibInstall(userInfo, userInstance, computationalResource, libsToInstall, endpointDTO);
 	}
 
 	private UserComputationalResource getComputationalResource(String computationalName,
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
index 5e8704a..11d4b62 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
@@ -22,21 +22,23 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.annotation.BudgetLimited;
 import com.epam.dlab.backendapi.annotation.Project;
+import com.epam.dlab.backendapi.annotation.ProjectAdmin;
+import com.epam.dlab.backendapi.annotation.User;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.dao.UserGroupDao;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
-import com.epam.dlab.backendapi.domain.ProjectManagingDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.domain.UpdateProjectDTO;
+import com.epam.dlab.backendapi.roles.UserRoles;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.ExploratoryService;
 import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.SecurityService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
 import com.epam.dlab.constants.ServiceConsts;
-import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.exceptions.ResourceConflictException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
@@ -46,7 +48,6 @@
 import lombok.extern.slf4j.Slf4j;
 
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -99,12 +100,10 @@
 	}
 
 	@Override
-	public List<ProjectManagingDTO> getProjectsForManaging() {
-		return projectDAO.getProjects().stream().map(p -> new ProjectManagingDTO(
-				p.getName(), p.getBudget(), !exploratoryDAO.fetchProjectExploratoriesWhereStatusIn(p.getName(),
-				Collections.singletonList(UserInstanceStatus.RUNNING), UserInstanceStatus.RUNNING).isEmpty(),
-				!p.getEndpoints().stream().allMatch(e -> Arrays.asList(UserInstanceStatus.STARTING,
-						UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATING).contains(e.getStatus()))))
+	public List<ProjectDTO> getProjects(UserInfo user) {
+		return projectDAO.getProjects()
+				.stream()
+				.filter(project -> UserRoles.isProjectAdmin(user, project.getGroups()) || UserRoles.isAdmin(user))
 				.collect(Collectors.toList());
 	}
 
@@ -114,8 +113,8 @@
 	}
 
 	@Override
-	public List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status) {
-		return projectDAO.getProjectsWithStatus(status);
+	public List<ProjectDTO> getProjectsByEndpoint(String endpointName) {
+		return projectDAO.getProjectsByEndpoint(endpointName);
 	}
 
 	@BudgetLimited
@@ -142,13 +141,11 @@
 		exploratoryService.updateProjectExploratoryStatuses(name, endpoint, UserInstanceStatus.TERMINATING);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void terminateProject(UserInfo userInfo, String name) {
-		checkProjectRelatedResourcesInProgress(name, TERMINATE_ACTION);
-		get(name).getEndpoints()
-				.stream()
-				.map(ProjectEndpointDTO::getName)
-				.forEach(endpoint -> terminateEndpoint(userInfo, endpoint, name));
+	public void terminateEndpoint(@User UserInfo userInfo, List<String> endpoints, @Project String name) {
+		System.out.println("sd");
+		endpoints.forEach(endpoint -> terminateEndpoint(userInfo, endpoint, name));
 	}
 
 	@BudgetLimited
@@ -158,24 +155,43 @@
 		projectDAO.updateEdgeStatus(name, endpoint, UserInstanceStatus.STARTING);
 	}
 
+	@ProjectAdmin
+	@Override
+	public void start(@User UserInfo userInfo, List<String> endpoints, @Project String name) {
+		endpoints.forEach(endpoint -> start(userInfo, endpoint, name));
+	}
+
 	@Override
 	public void stop(UserInfo userInfo, String endpoint, String name) {
 		projectActionOnCloud(userInfo, name, STOP_PRJ_API, endpoint);
 		projectDAO.updateEdgeStatus(name, endpoint, UserInstanceStatus.STOPPING);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void stopWithResources(UserInfo userInfo, String projectName) {
-		ProjectDTO project = get(projectName);
-		checkProjectRelatedResourcesInProgress(projectName, STOP_ACTION);
-		exploratoryDAO.fetchRunningExploratoryFieldsForProject(projectName).forEach(this::stopNotebook);
-		project.getEndpoints().stream().filter(e -> !Arrays.asList(UserInstanceStatus.TERMINATED,
-				UserInstanceStatus.TERMINATING).contains(e.getStatus())).
-				forEach(e -> stop(userInfo, e.getName(), projectName));
+	public void stopWithResources(@User UserInfo userInfo, List<String> endpoints, @Project String projectName) {
+		List<ProjectEndpointDTO> endpointDTOs = get(projectName)
+				.getEndpoints()
+				.stream()
+				.filter(projectEndpointDTO -> endpoints.contains(projectEndpointDTO.getName()))
+				.collect(Collectors.toList());
+		checkProjectRelatedResourcesInProgress(projectName, endpointDTOs, STOP_ACTION);
+
+		exploratoryDAO.fetchRunningExploratoryFieldsForProject(projectName,
+				endpointDTOs
+						.stream()
+						.map(ProjectEndpointDTO::getName)
+						.collect(Collectors.toList()))
+				.forEach(e -> exploratoryService.stop(new UserInfo(e.getUser(), userInfo.getAccessToken()), projectName, e.getExploratoryName()));
+
+		endpointDTOs.stream().filter(e -> !Arrays.asList(UserInstanceStatus.TERMINATED,
+				UserInstanceStatus.TERMINATING, UserInstanceStatus.STOPPED, UserInstanceStatus.FAILED).contains(e.getStatus()))
+				.forEach(e -> stop(userInfo, e.getName(), projectName));
 	}
 
+	@ProjectAdmin
 	@Override
-	public void update(UserInfo userInfo, UpdateProjectDTO projectDTO) {
+	public void update(@User UserInfo userInfo, UpdateProjectDTO projectDTO, @Project String projectName) {
 		final ProjectDTO project = projectDAO.get(projectDTO.getName()).orElseThrow(projectNotFound());
 		final Set<String> endpoints = project.getEndpoints()
 				.stream()
@@ -188,16 +204,11 @@
 				.collect(Collectors.toList());
 		project.getEndpoints().addAll(endpointsToBeCreated);
 		projectDAO.update(new ProjectDTO(project.getName(), projectDTO.getGroups(), project.getKey(),
-				project.getTag(), project.getBudget(), project.getEndpoints()));
+				project.getTag(), project.getBudget(), project.getEndpoints(), projectDTO.isSharedImageEnabled()));
 		endpointsToBeCreated.forEach(e -> createEndpoint(userInfo, project, e.getName()));
 	}
 
 	@Override
-	public void updateBudget(String project, Integer budget) {
-		projectDAO.updateBudget(project, budget);
-	}
-
-	@Override
 	public void updateBudget(List<ProjectDTO> projects) {
 		projects.forEach(p -> projectDAO.updateBudget(p.getName(), p.getBudget()));
 	}
@@ -210,6 +221,17 @@
 		return projectDAO.isAnyProjectAssigned(userGroups);
 	}
 
+	@Override
+	public boolean checkExploratoriesAndComputationalProgress(String projectName, List<String> endpoints) {
+		return exploratoryDAO.fetchProjectEndpointExploratoriesWhereStatusIn(projectName, endpoints, Arrays.asList(
+				UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE,
+				UserInstanceStatus.CONFIGURING, UserInstanceStatus.RECONFIGURING, UserInstanceStatus.STOPPING,
+				UserInstanceStatus.TERMINATING),
+				UserInstanceStatus.CREATING, UserInstanceStatus.CONFIGURING, UserInstanceStatus.STARTING,
+				UserInstanceStatus.RECONFIGURING, UserInstanceStatus.CREATING_IMAGE, UserInstanceStatus.STOPPING,
+				UserInstanceStatus.TERMINATING).isEmpty();
+	}
+
 	private void createProjectOnCloud(UserInfo user, ProjectDTO projectDTO) {
 		try {
 			projectDTO.getEndpoints().forEach(endpoint -> createEndpoint(user, projectDTO,
@@ -221,19 +243,17 @@
 	}
 
 	private void createEndpoint(UserInfo user, ProjectDTO projectDTO, String endpointName) {
-		String uuid =
-				provisioningService.post(endpointService.get(endpointName).getUrl() + CREATE_PRJ_API,
-						user.getAccessToken(),
-						requestBuilder.newProjectCreate(user, projectDTO, endpointName), String.class);
+		EndpointDTO endpointDTO = endpointService.get(endpointName);
+		String uuid = provisioningService.post(endpointDTO.getUrl() + CREATE_PRJ_API, user.getAccessToken(),
+						requestBuilder.newProjectCreate(user, projectDTO, endpointDTO), String.class);
 		requestId.put(user.getName(), uuid);
 	}
 
-
 	private void projectActionOnCloud(UserInfo user, String projectName, String provisioningApiUri, String endpoint) {
 		try {
-			String uuid = provisioningService.post(endpointService.get(endpoint).getUrl() + provisioningApiUri,
-					user.getAccessToken(),
-					requestBuilder.newProjectAction(user, projectName, endpoint), String.class);
+			EndpointDTO endpointDTO = endpointService.get(endpoint);
+			String uuid = provisioningService.post(endpointDTO.getUrl() + provisioningApiUri, user.getAccessToken(),
+					requestBuilder.newProjectAction(user, projectName, endpointDTO), String.class);
 			requestId.put(user.getName(), uuid);
 		} catch (Exception e) {
 			log.error("Can not terminate project due to: {}", e.getMessage());
@@ -241,21 +261,16 @@
 		}
 	}
 
-	private void checkProjectRelatedResourcesInProgress(String projectName, String action) {
-		List<UserInstanceDTO> userInstanceDTOs = exploratoryDAO.fetchProjectExploratoriesWhereStatusIn(projectName,
-				Arrays.asList(UserInstanceStatus.CREATING, UserInstanceStatus.STARTING,
-						UserInstanceStatus.CREATING_IMAGE), UserInstanceStatus.CREATING,
-				UserInstanceStatus.CONFIGURING, UserInstanceStatus.STARTING, UserInstanceStatus.RECONFIGURING,
-				UserInstanceStatus.CREATING_IMAGE);
-		if (!userInstanceDTOs.isEmpty()) {
-			throw new ResourceConflictException((String.format("Can not %s environment because on of user resource " +
-					"is in status CREATING or STARTING", action)));
-		}
-	}
+	private void checkProjectRelatedResourcesInProgress(String projectName, List<ProjectEndpointDTO> endpoints, String action) {
+        boolean edgeProgress = endpoints.stream().anyMatch(e ->
+                Arrays.asList(UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.STOPPING,
+                        UserInstanceStatus.TERMINATING).contains(e.getStatus()));
 
-	private void stopNotebook(UserInstanceDTO instance) {
-		final UserInfo userInfo = securityService.getUserInfoOffline(instance.getUser());
-		exploratoryService.stop(userInfo, instance.getExploratoryName());
+		List<String> endpointsName = endpoints.stream().map(ProjectEndpointDTO::getName).collect(Collectors.toList());
+		if (edgeProgress || !checkExploratoriesAndComputationalProgress(projectName, endpointsName)) {
+			throw new ResourceConflictException((String.format("Can not %s environment because one of project " +
+					"resource is in processing stage", action)));
+		}
 	}
 
 	private Supplier<ResourceNotFoundException> projectNotFound() {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
index e783d9c..e75d9df 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
@@ -19,21 +19,15 @@
 
 package com.epam.dlab.backendapi.service.impl;
 
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.ExploratoryService;
 import com.epam.dlab.backendapi.service.ReuploadKeyService;
-import com.epam.dlab.backendapi.service.UserResourceService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
 import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.dto.reuploadkey.ReuploadKeyDTO;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatus;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatusDTO;
-import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.model.ResourceType;
 import com.epam.dlab.rest.client.RESTService;
@@ -41,25 +35,15 @@
 import com.google.inject.Singleton;
 import com.google.inject.name.Named;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
 
 import static com.epam.dlab.constants.ServiceConsts.PROVISIONING_SERVICE_NAME;
-import static com.epam.dlab.dto.UserInstanceStatus.REUPLOADING_KEY;
 import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-import static com.epam.dlab.rest.contracts.KeyAPI.REUPLOAD_KEY;
 
 @Singleton
 @Slf4j
 public class ReuploadKeyServiceImpl implements ReuploadKeyService {
 
 	@Inject
-	private KeyDAO keyDAO;
-	@Inject
 	@Named(PROVISIONING_SERVICE_NAME)
 	private RESTService provisioningService;
 	@Inject
@@ -72,91 +56,41 @@
 	private ComputationalDAO computationalDAO;
 	@Inject
 	private ExploratoryDAO exploratoryDAO;
-	@Inject
-	private UserResourceService userResourceService;
 
 	private static final String REUPLOAD_KEY_UPDATE_MSG = "Reuploading key process is successfully finished. " +
 			"Updating 'reupload_key_required' flag to 'false' for {}.";
 	private static final String REUPLOAD_KEY_ERROR_MSG = "Reuploading key process is failed for {}. The next attempt" +
 			"starts after resource restarting.";
 
-
-	@Override
-	public String reuploadKey(UserInfo user, String keyContent) {
-		userResourceService.updateReuploadKeyFlagForUserResources(user.getName(), true);
-		List<ResourceData> resourcesForKeyReuploading = userResourceService.convertToResourceData(
-				exploratoryService.getInstancesWithStatuses(user.getName(), RUNNING, RUNNING));
-		keyDAO.getEdgeInfoWhereStatusIn(user.getName(), RUNNING)
-				.ifPresent(edgeInfo -> {
-					resourcesForKeyReuploading.add(ResourceData.edgeResource(edgeInfo.getInstanceId()));
-					keyDAO.updateEdgeStatus(user.getName(), REUPLOADING_KEY.toString());
-				});
-		updateStatusForUserInstances(user.getName(), REUPLOADING_KEY);
-
-		ReuploadKeyDTO reuploadKeyDTO = requestBuilder.newKeyReupload(user, UUID.randomUUID().toString(), keyContent,
-				resourcesForKeyReuploading);
-		return provisioningService.post(REUPLOAD_KEY, user.getAccessToken(), reuploadKeyDTO, String.class);
-	}
-
 	@Override
 	public void updateResourceData(ReuploadKeyStatusDTO dto) {
-		String user = dto.getUser();
-		ResourceData resource = dto.getReuploadKeyCallbackDTO().getResource();
-		log.debug("Updating resource {} to status RUNNING...", resource.toString());
-		updateResourceStatus(user, resource, RUNNING);
-		if (dto.getReuploadKeyStatus() == ReuploadKeyStatus.COMPLETED) {
-			log.debug(REUPLOAD_KEY_UPDATE_MSG, resource.toString());
-			updateResourceReuploadKeyFlag(user, resource, false);
-		} else {
-			log.error(REUPLOAD_KEY_ERROR_MSG, resource.toString());
-		}
-	}
+        String user = dto.getUser();
+        ResourceData resource = dto.getReuploadKeyCallbackDTO().getResource();
+        log.debug("Updating resource {} to status RUNNING...", resource.toString());
+        updateResourceStatus(user, null, resource, RUNNING);
+        if (dto.getReuploadKeyStatus() == ReuploadKeyStatus.COMPLETED) {
+            log.debug(REUPLOAD_KEY_UPDATE_MSG, resource.toString());
+            updateResourceReuploadKeyFlag(user, null, resource, false);
+        } else {
+            log.error(REUPLOAD_KEY_ERROR_MSG, resource.toString());
+        }
+    }
 
-	@Override
-	public void reuploadKeyAction(UserInfo userInfo, ResourceData resourceData) {
-		try {
-			updateResourceStatus(userInfo.getName(), resourceData, REUPLOADING_KEY);
-			ReuploadKeyDTO reuploadKeyDTO = requestBuilder.newKeyReupload(userInfo, UUID.randomUUID().toString(),
-					StringUtils.EMPTY, Collections.singletonList(resourceData));
-			String uuid = provisioningService.post(REUPLOAD_KEY, userInfo.getAccessToken(), reuploadKeyDTO,
-					String.class, Collections.singletonMap("is_primary_reuploading", false));
-			requestId.put(userInfo.getName(), uuid);
-		} catch (Exception t) {
-			log.error("Couldn't reupload key to " + resourceData.toString() + " for user {}", userInfo.getName(), t);
-			updateResourceStatus(userInfo.getName(), resourceData, RUNNING);
-			throw new DlabException("Couldn't reupload key to " + resourceData.toString() + " for user " +
-					userInfo.getName() + ":	" + t.getLocalizedMessage(), t);
-		}
-	}
+    private void updateResourceStatus(String user, String project, ResourceData resourceData, UserInstanceStatus newStatus) {
+        if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
+            exploratoryDAO.updateStatusForExploratory(user, project, resourceData.getExploratoryName(), newStatus);
+        } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
+            computationalDAO.updateStatusForComputationalResource(user, project,
+                    resourceData.getExploratoryName(), resourceData.getComputationalName(), newStatus);
+        }
+    }
 
-	private void updateResourceStatus(String user, ResourceData resourceData, UserInstanceStatus newStatus) {
-		if (resourceData.getResourceType() == ResourceType.EDGE) {
-			keyDAO.updateEdgeStatus(user, newStatus.toString());
-		} else if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
-			exploratoryDAO.updateStatusForExploratory(user, resourceData.getExploratoryName(), newStatus);
-		} else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
-			computationalDAO.updateStatusForComputationalResource(user, resourceData.getExploratoryName(),
-					resourceData.getComputationalName(), newStatus);
-		}
-	}
-
-	private void updateResourceReuploadKeyFlag(String user, ResourceData resourceData, boolean reuploadKeyRequired) {
-		if (resourceData.getResourceType() == ResourceType.EDGE) {
-			keyDAO.updateEdgeReuploadKey(user, reuploadKeyRequired, UserInstanceStatus.values());
-		} else if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
-			exploratoryDAO.updateReuploadKeyForExploratory(user, resourceData.getExploratoryName(),
-					reuploadKeyRequired);
-		} else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
-			computationalDAO.updateReuploadKeyFlagForComputationalResource(user, resourceData.getExploratoryName(),
-					resourceData.getComputationalName(), reuploadKeyRequired);
-		}
-	}
-
-	private void updateStatusForUserInstances(String user, UserInstanceStatus newStatus) {
-		exploratoryDAO.updateStatusForExploratories(newStatus, user, RUNNING);
-		computationalDAO.updateStatusForComputationalResources(newStatus, user,
-				Arrays.asList(RUNNING, REUPLOADING_KEY), Arrays.asList(DataEngineType.SPARK_STANDALONE,
-						DataEngineType.CLOUD_SERVICE), RUNNING);
-	}
-
+    private void updateResourceReuploadKeyFlag(String user, String project, ResourceData resourceData, boolean reuploadKeyRequired) {
+        if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
+            exploratoryDAO.updateReuploadKeyForExploratory(user, project, resourceData.getExploratoryName(), reuploadKeyRequired);
+        } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
+            computationalDAO.updateReuploadKeyFlagForComputationalResource(user, project,
+                    resourceData.getExploratoryName(), resourceData.getComputationalName(), reuploadKeyRequired);
+        }
+    }
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
index e84908e..cb7b1c1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
@@ -44,7 +44,12 @@
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.lang3.StringUtils;
 
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
 import java.time.temporal.ChronoUnit;
 import java.util.Date;
 import java.util.List;
@@ -55,7 +60,13 @@
 import java.util.stream.Stream;
 
 import static com.epam.dlab.constants.ServiceConsts.PROVISIONING_SERVICE_NAME;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
 import static com.epam.dlab.dto.base.DataEngineType.getDockerImageName;
 import static java.time.ZoneId.systemDefault;
 import static java.util.Collections.singletonList;
@@ -98,44 +109,44 @@
 	private RESTService provisioningService;
 
 	@Override
-	public SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String exploratoryName) {
-		return schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(user, exploratoryName)
+	public SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String project, String exploratoryName) {
+		return schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(user, project, exploratoryName)
 				.orElseThrow(() -> new ResourceNotFoundException(String.format(SCHEDULER_NOT_FOUND_MSG, user,
 						exploratoryName)));
 	}
 
 	@Override
-	public SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String exploratoryName,
+	public SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String project, String exploratoryName,
 																	 String computationalName) {
-		return schedulerJobDAO.fetchSingleSchedulerJobForCluster(user, exploratoryName, computationalName)
+		return schedulerJobDAO.fetchSingleSchedulerJobForCluster(user, project, exploratoryName, computationalName)
 				.orElseThrow(() -> new ResourceNotFoundException(String.format(SCHEDULER_NOT_FOUND_MSG, user,
 						exploratoryName) + " with computational resource " + computationalName));
 	}
 
 	@Override
-	public void updateExploratorySchedulerData(String user, String exploratoryName, SchedulerJobDTO dto) {
-		validateExploratoryStatus(user, exploratoryName);
+	public void updateExploratorySchedulerData(String user, String project, String exploratoryName, SchedulerJobDTO dto) {
+		validateExploratoryStatus(user, project, exploratoryName);
 		populateDefaultSchedulerValues(dto);
 		log.debug("Updating exploratory {} for user {} with new scheduler job data: {}...", exploratoryName, user,
 				dto);
-		exploratoryDAO.updateSchedulerDataForUserAndExploratory(user, exploratoryName, dto);
+		exploratoryDAO.updateSchedulerDataForUserAndExploratory(user, project, exploratoryName, dto);
 
 		if (!dto.inactivityScheduler() && dto.isSyncStartRequired()) {
-			shareSchedulerJobDataToSparkClusters(user, exploratoryName, dto);
+			shareSchedulerJobDataToSparkClusters(user, project, exploratoryName, dto);
 		} else if (!dto.inactivityScheduler()) {
-			computationalDAO.updateSchedulerSyncFlag(user, exploratoryName, dto.isSyncStartRequired());
+			computationalDAO.updateSchedulerSyncFlag(user, project, exploratoryName, dto.isSyncStartRequired());
 		}
 	}
 
 	@Override
-	public void updateComputationalSchedulerData(String user, String exploratoryName, String computationalName,
+	public void updateComputationalSchedulerData(String user, String project, String exploratoryName, String computationalName,
 												 SchedulerJobDTO dto) {
-		validateExploratoryStatus(user, exploratoryName);
-		validateComputationalStatus(user, exploratoryName, computationalName);
+		validateExploratoryStatus(user, project, exploratoryName);
+		validateComputationalStatus(user, project, exploratoryName, computationalName);
 		populateDefaultSchedulerValues(dto);
 		log.debug("Updating computational resource {} affiliated with exploratory {} for user {} with new scheduler " +
 				"job data {}...", computationalName, exploratoryName, user, dto);
-		computationalDAO.updateSchedulerDataForComputationalResource(user, exploratoryName, computationalName, dto);
+		computationalDAO.updateSchedulerDataForComputationalResource(user, project, exploratoryName, computationalName, dto);
 	}
 
 	@Override
@@ -203,11 +214,12 @@
 	}
 
 	private void stopComputational(SchedulerJobData job) {
+		final String project = job.getProject();
 		final String expName = job.getExploratoryName();
 		final String compName = job.getComputationalName();
 		final String user = job.getUser();
 		log.debug("Stopping exploratory {} computational {} for user {} by scheduler", expName, compName, user);
-		computationalService.stopSparkCluster(securityService.getServiceAccountInfo(user), expName, compName);
+		computationalService.stopSparkCluster(securityService.getServiceAccountInfo(user), project, expName, compName);
 	}
 
 	private void terminateComputational(SchedulerJobData job) {
@@ -216,14 +228,15 @@
 		final String compName = job.getComputationalName();
 		final UserInfo userInfo = securityService.getServiceAccountInfo(user);
 		log.debug("Terminating exploratory {} computational {} for user {} by scheduler", expName, compName, user);
-		computationalService.terminateComputational(userInfo, expName, compName);
+		computationalService.terminateComputational(userInfo, job.getProject(), expName, compName);
 	}
 
 	private void stopExploratory(SchedulerJobData job) {
 		final String expName = job.getExploratoryName();
 		final String user = job.getUser();
+		final String project = job.getProject();
 		log.debug("Stopping exploratory {} for user {} by scheduler", expName, user);
-		exploratoryService.stop(securityService.getServiceAccountInfo(user), expName);
+		exploratoryService.stop(securityService.getServiceAccountInfo(user), project, expName);
 	}
 
 	private List<SchedulerJobData> getExploratorySchedulersForTerminating(OffsetDateTime now) {
@@ -250,7 +263,7 @@
 			log.trace("Starting computational for exploratory {} for user {} by scheduler", exploratoryName, user);
 			final DataEngineType sparkCluster = DataEngineType.SPARK_STANDALONE;
 			final List<UserComputationalResource> compToBeStarted =
-					computationalDAO.findComputationalResourcesWithStatus(user, exploratoryName, STOPPED);
+					computationalDAO.findComputationalResourcesWithStatus(user, project, exploratoryName, STOPPED);
 
 			compToBeStarted
 					.stream()
@@ -261,9 +274,10 @@
 
 	private void terminateExploratory(SchedulerJobData job) {
 		final String user = job.getUser();
+		final String project = job.getProject();
 		final String expName = job.getExploratoryName();
 		log.debug("Terminating exploratory {} for user {} by scheduler", expName, user);
-		exploratoryService.terminate(securityService.getUserInfoOffline(user), expName);
+		exploratoryService.terminate(securityService.getUserInfoOffline(user), project, expName);
 	}
 
 	private void startSpark(String user, String expName, String compName, String project) {
@@ -282,19 +296,20 @@
 	 * performed automatically with notebook stopping since Spark resources have such feature).
 	 *
 	 * @param user            user's name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory resource
 	 * @param dto             scheduler job data.
 	 */
-	private void shareSchedulerJobDataToSparkClusters(String user, String exploratoryName, SchedulerJobDTO dto) {
-		List<String> correspondingSparkClusters = computationalDAO.getComputationalResourcesWhereStatusIn(user,
-				singletonList(DataEngineType.SPARK_STANDALONE), exploratoryName,
-				STARTING, RUNNING, STOPPING, STOPPED);
+	private void shareSchedulerJobDataToSparkClusters(String user, String project, String exploratoryName, SchedulerJobDTO dto) {
+		List<String> correspondingSparkClusters = computationalDAO.getComputationalResourcesWhereStatusIn(user, project,
+				singletonList(DataEngineType.SPARK_STANDALONE),
+				exploratoryName, STARTING, RUNNING, STOPPING, STOPPED);
 		SchedulerJobDTO dtoWithoutStopData = getSchedulerJobWithoutStopData(dto);
 		for (String sparkName : correspondingSparkClusters) {
 			log.debug("Updating computational resource {} affiliated with exploratory {} for user {} with new " +
 					"scheduler job data {}...", sparkName, exploratoryName, user, dtoWithoutStopData);
-			computationalDAO.updateSchedulerDataForComputationalResource(user, exploratoryName, sparkName,
-					dtoWithoutStopData);
+			computationalDAO.updateSchedulerDataForComputationalResource(user, project, exploratoryName,
+					sparkName, dtoWithoutStopData);
 		}
 	}
 
@@ -367,10 +382,11 @@
 	}
 
 	private boolean computationalInactivityExceed(SchedulerJobData schedulerJobData, SchedulerJobDTO schedulerData) {
+		final String projectName = schedulerJobData.getProject();
 		final String explName = schedulerJobData.getExploratoryName();
 		final String compName = schedulerJobData.getComputationalName();
 		final String user = schedulerJobData.getUser();
-		final UserComputationalResource c = computationalDAO.fetchComputationalFields(user, explName, compName);
+		final UserComputationalResource c = computationalDAO.fetchComputationalFields(user, projectName, explName, compName);
 		final Long maxInactivity = schedulerData.getMaxInactivity();
 		return inactivityCondition(maxInactivity, c.getStatus(), c.getLastActivity());
 	}
@@ -381,9 +397,10 @@
 	}
 
 	private boolean exploratoryInactivityExceed(SchedulerJobData schedulerJobData, SchedulerJobDTO schedulerData) {
+		final String project = schedulerJobData.getProject();
 		final String expName = schedulerJobData.getExploratoryName();
 		final String user = schedulerJobData.getUser();
-		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(user, expName, true);
+		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(user, project, expName, true);
 		final boolean canBeStopped = userInstanceDTO.getResources()
 				.stream()
 				.map(UserComputationalResource::getStatus)
@@ -409,14 +426,14 @@
 		}
 	}
 
-	private void validateExploratoryStatus(String user, String exploratoryName) {
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(user, exploratoryName);
+	private void validateExploratoryStatus(String user, String project, String exploratoryName) {
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(user, project, exploratoryName);
 		validateResourceStatus(userInstance.getStatus());
 	}
 
-	private void validateComputationalStatus(String user, String exploratoryName, String computationalName) {
+	private void validateComputationalStatus(String user, String project, String exploratoryName, String computationalName) {
 		final UserComputationalResource computationalResource =
-				computationalDAO.fetchComputationalFields(user, exploratoryName, computationalName);
+				computationalDAO.fetchComputationalFields(user, project, exploratoryName, computationalName);
 		final String computationalStatus = computationalResource.getStatus();
 		validateResourceStatus(computationalStatus);
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java
index 0a71587..9eb25c3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java
@@ -18,73 +18,69 @@
  */
 package com.epam.dlab.backendapi.service.impl;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.dao.UserGroupDao;
 import com.epam.dlab.backendapi.dao.UserRoleDao;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
+import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.UserGroupService;
 import com.epam.dlab.dto.UserInstanceStatus;
+import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceConflictException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 @Singleton
 @Slf4j
 public class UserGroupServiceImpl implements UserGroupService {
-
 	private static final String ROLE_NOT_FOUND_MSG = "Any of role : %s were not found";
+	private static final String ADMIN = "admin";
+	private static final String PROJECT_ADMIN = "projectAdmin";
+
 	@Inject
 	private UserGroupDao userGroupDao;
 	@Inject
 	private UserRoleDao userRoleDao;
 	@Inject
 	private ProjectDAO projectDAO;
+	@Inject
+	private ProjectService projectService;
 
 	@Override
 	public void createGroup(String group, Set<String> roleIds, Set<String> users) {
 		checkAnyRoleFound(roleIds, userRoleDao.addGroupToRole(Collections.singleton(group), roleIds));
-		if (!users.isEmpty()) {
-			log.debug("Adding users {} to group {}", users, group);
-			userGroupDao.addUsers(group, users);
-		}
-	}
-
-	@Override
-	public void updateGroup(String group, Set<String> roleIds, Set<String> users) {
-		log.debug("Updating users for group {}: {}", group, users);
-		userGroupDao.updateUsers(group, users);
-		log.debug("Removing group {} from existing roles", group);
-		userRoleDao.removeGroupWhenRoleNotIn(group, roleIds);
-		log.debug("Adding group {} to roles {}", group, roleIds);
-		userRoleDao.addGroupToRole(Collections.singleton(group), roleIds);
-	}
-
-	@Override
-	public void addUsersToGroup(String group, Set<String> users) {
+		log.debug("Adding users {} to group {}", users, group);
 		userGroupDao.addUsers(group, users);
 	}
 
 	@Override
-	public void updateRolesForGroup(String group, Set<String> roleIds) {
-		userRoleDao.removeGroupWhenRoleNotIn(group, roleIds);
-		checkAnyRoleFound(roleIds, userRoleDao.addGroupToRole(Collections.singleton(group), roleIds));
-	}
-
-	@Override
-	public void removeUserFromGroup(String group, String user) {
-		userGroupDao.removeUser(group, user);
-	}
-
-	@Override
-	public void removeGroupFromRole(Set<String> groups, Set<String> roleIds) {
-		checkAnyRoleFound(roleIds, userRoleDao.removeGroupFromRole(groups, roleIds));
+	public void updateGroup(UserInfo user, String group, Set<String> roleIds, Set<String> users) {
+		if (UserRoles.isAdmin(user)) {
+			updateGroup(group, roleIds, users);
+		} else if (UserRoles.isProjectAdmin(user)) {
+			projectService.getProjects(user)
+					.stream()
+					.map(ProjectDTO::getGroups)
+					.flatMap(Collection::stream)
+					.filter(g -> g.equalsIgnoreCase(group))
+					.findAny()
+					.orElseThrow(() -> new DlabException(String.format("User %s doesn't have appropriate permission", user.getName())));
+			updateGroup(group, roleIds, users);
+		} else {
+			throw new DlabException(String.format("User %s doesn't have appropriate permission", user.getName()));
+		}
 	}
 
 	@Override
@@ -102,8 +98,39 @@
 	}
 
 	@Override
-	public List<UserGroupDto> getAggregatedRolesByGroup() {
-		return userRoleDao.aggregateRolesByGroup();
+	public List<UserGroupDto> getAggregatedRolesByGroup(UserInfo user) {
+		if (UserRoles.isAdmin(user)) {
+			return userRoleDao.aggregateRolesByGroup();
+		} else if (UserRoles.isProjectAdmin(user)) {
+			Set<String> groups = projectService.getProjects(user)
+					.stream()
+					.map(ProjectDTO::getGroups)
+					.flatMap(Collection::stream)
+					.collect(Collectors.toSet());
+			return userRoleDao.aggregateRolesByGroup()
+					.stream()
+					.filter(userGroup -> groups.contains(userGroup.getGroup()) && !containsAdministrationPermissions(userGroup))
+					.collect(Collectors.toList());
+		} else {
+			throw new DlabException(String.format("User %s doesn't have appropriate permission", user.getName()));
+		}
+	}
+
+	private boolean containsAdministrationPermissions(UserGroupDto userGroup) {
+		List<String> ids = userGroup.getRoles()
+				.stream()
+				.map(UserRoleDto::getId)
+				.collect(Collectors.toList());
+		return ids.contains(ADMIN) || ids.contains(PROJECT_ADMIN);
+	}
+
+	private void updateGroup(String group, Set<String> roleIds, Set<String> users) {
+		log.debug("Updating users for group {}: {}", group, users);
+		userGroupDao.updateUsers(group, users);
+		log.debug("Removing group {} from existing roles", group);
+		userRoleDao.removeGroupWhenRoleNotIn(group, roleIds);
+		log.debug("Adding group {} to roles {}", group, roleIds);
+		userRoleDao.addGroupToRole(Collections.singleton(group), roleIds);
 	}
 
 	private void checkAnyRoleFound(Set<String> roleIds, boolean anyRoleFound) {
@@ -111,6 +138,4 @@
 			throw new ResourceNotFoundException(String.format(ROLE_NOT_FOUND_MSG, roleIds));
 		}
 	}
-
-
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserResourceServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserResourceServiceImpl.java
deleted file mode 100644
index 7a1c27c..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserResourceServiceImpl.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-
-package com.epam.dlab.backendapi.service.impl;
-
-import com.epam.dlab.backendapi.service.ComputationalService;
-import com.epam.dlab.backendapi.service.EdgeService;
-import com.epam.dlab.backendapi.service.ExploratoryService;
-import com.epam.dlab.backendapi.service.UserResourceService;
-import com.epam.dlab.dto.UserInstanceDTO;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.model.ResourceData;
-import com.google.inject.Inject;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static com.epam.dlab.dto.UserInstanceStatus.*;
-
-public class UserResourceServiceImpl implements UserResourceService {
-
-	@Inject
-	private ExploratoryService exploratoryService;
-	@Inject
-	private ComputationalService computationalService;
-	@Inject
-	private EdgeService edgeService;
-
-	/**
-	 * Converts user's instances to another data type.
-	 *
-	 * @param userInstances list of user instances.
-	 * @return converted list of resources' data.
-	 */
-	@Override
-	public List<ResourceData> convertToResourceData(List<UserInstanceDTO> userInstances) {
-		return userInstances
-				.stream()
-				.flatMap(this::resourceDataStream)
-				.collect(Collectors.toList());
-	}
-
-	/**
-	 * Updates flag 'reuploadKeyRequired' for user's resources with predefined statuses.
-	 *
-	 * @param user                user's name.
-	 * @param reuploadKeyRequired true/false.
-	 */
-	@Override
-	public void updateReuploadKeyFlagForUserResources(String user, boolean reuploadKeyRequired) {
-		exploratoryService.updateExploratoriesReuploadKeyFlag(user, reuploadKeyRequired,
-				CREATING, CONFIGURING, STARTING, RUNNING, STOPPING, STOPPED);
-		computationalService.updateComputationalsReuploadKeyFlag(user,
-				Arrays.asList(STARTING, RUNNING, STOPPING, STOPPED),
-				Collections.singletonList(DataEngineType.SPARK_STANDALONE),
-				reuploadKeyRequired,
-				CREATING, CONFIGURING, STARTING, RUNNING, STOPPING, STOPPED);
-		computationalService.updateComputationalsReuploadKeyFlag(user,
-				Collections.singletonList(RUNNING),
-				Collections.singletonList(DataEngineType.CLOUD_SERVICE),
-				reuploadKeyRequired,
-				CREATING, CONFIGURING, STARTING, RUNNING);
-		edgeService.updateReuploadKeyFlag(user, reuploadKeyRequired, STARTING, RUNNING, STOPPING, STOPPED);
-	}
-
-	private Stream<ResourceData> resourceDataStream(UserInstanceDTO ui) {
-		final Stream<ResourceData> exploratoryStream =
-				Stream.of(ResourceData.exploratoryResource(ui.getExploratoryId(), ui.getExploratoryName()));
-		final Stream<ResourceData> computationalStream = ui.getResources()
-				.stream()
-				.map(cr -> ResourceData.computationalResource(cr.getComputationalId(),
-						ui.getExploratoryName(), cr.getComputationalName()));
-		return Stream.concat(exploratoryStream, computationalStream);
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/BillingUtils.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/BillingUtils.java
new file mode 100644
index 0000000..7e46a09
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/BillingUtils.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.util;
+
+import com.epam.dlab.backendapi.domain.BillingReportLine;
+import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
+import com.epam.dlab.dto.UserInstanceDTO;
+import com.epam.dlab.dto.UserInstanceStatus;
+import com.epam.dlab.dto.base.DataEngineType;
+import com.epam.dlab.dto.computational.UserComputationalResource;
+import jersey.repackaged.com.google.common.collect.Lists;
+import org.apache.commons.lang3.StringUtils;
+
+import java.time.LocalDate;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Stream;
+
+import static com.epam.dlab.dto.billing.BillingResourceType.BUCKET;
+import static com.epam.dlab.dto.billing.BillingResourceType.COMPUTATIONAL;
+import static com.epam.dlab.dto.billing.BillingResourceType.EDGE;
+import static com.epam.dlab.dto.billing.BillingResourceType.ENDPOINT;
+import static com.epam.dlab.dto.billing.BillingResourceType.EXPLORATORY;
+import static com.epam.dlab.dto.billing.BillingResourceType.IMAGE;
+import static com.epam.dlab.dto.billing.BillingResourceType.SSN;
+import static com.epam.dlab.dto.billing.BillingResourceType.VOLUME;
+
+public class BillingUtils {
+    private static final String[] AVAILABLE_NOTEBOOKS = {"zeppelin", "tensor-rstudio", "rstudio", "tensor", "superset", "jupyterlab", "jupyter", "deeplearning"};
+    private static final String[] REPORT_HEADERS = {"DLab ID", "User", "Project", "DLab Resource Type", "Status", "Shape", "Product", "Cost"};
+    private static final String REPORT_FIRST_LINE = "Service base name: %s. Available reporting period from: %s to: %s";
+    private static final String TOTAL_LINE = "Total: %s %s";
+    private static final String SSN_FORMAT = "%s-ssn";
+    private static final String ENDPOINT_FORMAT = "%s-%s-endpoint";
+    private static final String EDGE_FORMAT = "%s-%s-%s-edge";
+    private static final String EDGE_VOLUME_FORMAT = "%s-%s-%s-edge-volume-primary";
+    private static final String PROJECT_ENDPOINT_BUCKET_FORMAT = "%s-%s-%s-bucket";
+    private static final String ENDPOINT_SHARED_BUCKET_FORMAT = "%s-%s-shared-bucket";
+    private static final String VOLUME_PRIMARY_FORMAT = "%s-volume-primary";
+    private static final String VOLUME_PRIMARY_COMPUTATIONAL_FORMAT = "%s-%s-volume-primary";
+    private static final String VOLUME_SECONDARY_FORMAT = "%s-volume-secondary";
+    private static final String VOLUME_SECONDARY_COMPUTATIONAL_FORMAT = "%s-%s-volume-secondary";
+    private static final String IMAGE_STANDARD_FORMAT1 = "%s-%s-%s-%s-notebook-image";
+    private static final String IMAGE_STANDARD_FORMAT2 = "%s-%s-%s-notebook-image";
+    private static final String IMAGE_CUSTOM_FORMAT = "%s-%s-%s-%s-%s";
+
+    private static final String SHARED_RESOURCE = "Shared resource";
+    private static final String IMAGE_NAME = "Image";
+
+    private static final String DATAENGINE_NAME_FORMAT = "%d x %s";
+    private static final String DATAENGINE_SERVICE_NAME_FORMAT = "Master: %sSlave: %s";
+
+    public static Stream<BillingReportLine> edgeBillingDataStream(String project, String sbn, String endpoint) {
+        final String userEdgeId = String.format(EDGE_FORMAT, sbn, project, endpoint).toLowerCase();
+        final String edgeVolumeId = String.format(EDGE_VOLUME_FORMAT, sbn, project, endpoint).toLowerCase();
+        final String endpointBucketId = String.format(PROJECT_ENDPOINT_BUCKET_FORMAT, sbn, project, endpoint).toLowerCase();
+
+        return Stream.concat(Stream.of(
+                BillingReportLine.builder().resourceName(endpoint).user(SHARED_RESOURCE).project(project).dlabId(userEdgeId).resourceType(EDGE).build(),
+                BillingReportLine.builder().resourceName("EDGE volume").user(SHARED_RESOURCE).project(project).dlabId(edgeVolumeId).resourceType(VOLUME).build(),
+                BillingReportLine.builder().resourceName("Project endpoint shared bucket").user(SHARED_RESOURCE).project(project).dlabId(endpointBucketId).resourceType(BUCKET).build()
+                ),
+                standardImageBillingDataStream(sbn, project, endpoint)
+        );
+    }
+
+    public static Stream<BillingReportLine> ssnBillingDataStream(String sbn) {
+        final String ssnId = String.format(SSN_FORMAT, sbn);
+        return Stream.of(
+                BillingReportLine.builder().user(SHARED_RESOURCE).project(SHARED_RESOURCE).resourceName("SSN").dlabId(ssnId).resourceType(SSN).build(),
+                BillingReportLine.builder().user(SHARED_RESOURCE).project(SHARED_RESOURCE).resourceName("SSN Volume").dlabId(String.format(VOLUME_PRIMARY_FORMAT, ssnId)).resourceType(VOLUME).build()
+        );
+    }
+
+    public static Stream<BillingReportLine> sharedEndpointBillingDataStream(String endpoint, String sbn) {
+        final String projectEndpointBucketId = String.format(ENDPOINT_SHARED_BUCKET_FORMAT, sbn, endpoint).toLowerCase();
+        final String endpointId = String.format(ENDPOINT_FORMAT, sbn, endpoint).toLowerCase();
+        return Stream.concat(Stream.of(
+                BillingReportLine.builder().resourceName("Endpoint shared bucket").user(SHARED_RESOURCE).project(SHARED_RESOURCE).dlabId(projectEndpointBucketId).resourceType(BUCKET).build(),
+                BillingReportLine.builder().resourceName("Endpoint").user(SHARED_RESOURCE).project(SHARED_RESOURCE).dlabId(endpointId).resourceType(ENDPOINT).build()
+                ),
+                standardImageBillingDataStream(sbn, endpoint));
+    }
+
+    public static Stream<BillingReportLine> exploratoryBillingDataStream(UserInstanceDTO userInstance, Integer maxSparkInstanceCount) {
+        final Stream<BillingReportLine> computationalStream = userInstance.getResources()
+                .stream()
+                .filter(cr -> cr.getComputationalId() != null)
+                .flatMap(cr -> {
+                    final String computationalId = cr.getComputationalId().toLowerCase();
+                    return Stream.concat(Stream.of(
+                            withUserProjectEndpoint(userInstance).resourceName(cr.getComputationalName()).dlabId(computationalId).resourceType(COMPUTATIONAL).shape(getComputationalShape(cr))
+                                    .exploratoryName(userInstance.getExploratoryName()).build(),
+                            withUserProjectEndpoint(userInstance).resourceName(cr.getComputationalName()).dlabId(String.format(VOLUME_PRIMARY_FORMAT, computationalId)).resourceType(VOLUME).build(),
+                            withUserProjectEndpoint(userInstance).resourceName(cr.getComputationalName()).dlabId(String.format(VOLUME_SECONDARY_FORMAT, computationalId)).resourceType(VOLUME).build(),
+                            withUserProjectEndpoint(userInstance).resourceName(cr.getComputationalName()).dlabId(String.format(VOLUME_PRIMARY_COMPUTATIONAL_FORMAT, computationalId, "m"))
+                                    .resourceType(VOLUME).build(),
+                            withUserProjectEndpoint(userInstance).resourceName(cr.getComputationalName()).dlabId(String.format(VOLUME_SECONDARY_COMPUTATIONAL_FORMAT, computationalId, "m"))
+                                    .resourceType(VOLUME).build()
+                            ),
+                            getSlaveVolumes(userInstance, cr, maxSparkInstanceCount)
+                    );
+                });
+        final String exploratoryName = userInstance.getExploratoryName();
+        final String exploratoryId = userInstance.getExploratoryId().toLowerCase();
+        final String primaryVolumeId = String.format(VOLUME_PRIMARY_FORMAT, exploratoryId);
+        final String secondaryVolumeId = String.format(VOLUME_SECONDARY_FORMAT, exploratoryId);
+        final Stream<BillingReportLine> exploratoryStream = Stream.of(
+                withUserProjectEndpoint(userInstance).resourceName(exploratoryName).dlabId(exploratoryId).resourceType(EXPLORATORY).shape(userInstance.getShape()).build(),
+                withUserProjectEndpoint(userInstance).resourceName(exploratoryName).dlabId(primaryVolumeId).resourceType(VOLUME).build(),
+                withUserProjectEndpoint(userInstance).resourceName(exploratoryName).dlabId(secondaryVolumeId).resourceType(VOLUME).build());
+
+        return Stream.concat(computationalStream, exploratoryStream);
+    }
+
+    public static Stream<BillingReportLine> customImageBillingDataStream(ImageInfoRecord image, String sbn) {
+        String imageId = String.format(IMAGE_CUSTOM_FORMAT, sbn, image.getProject(), image.getEndpoint(), image.getApplication(), image.getName()).toLowerCase();
+        return Stream.of(
+                BillingReportLine.builder().resourceName(image.getName()).project(image.getProject()).dlabId(imageId).user(image.getUser()).resourceType(IMAGE).build()
+        );
+    }
+
+    private static Stream<BillingReportLine> getSlaveVolumes(UserInstanceDTO userInstance, UserComputationalResource cr, Integer maxSparkInstanceCount) {
+        List<BillingReportLine> list = new ArrayList<>();
+        for (int i = 1; i <= maxSparkInstanceCount; i++) {
+            list.add(withUserProjectEndpoint(userInstance).resourceName(cr.getComputationalName()).dlabId(String.format(VOLUME_PRIMARY_COMPUTATIONAL_FORMAT, cr.getComputationalId().toLowerCase(), "s" + i))
+                    .resourceType(VOLUME).build());
+            list.add(withUserProjectEndpoint(userInstance).resourceName(cr.getComputationalName()).dlabId(String.format(VOLUME_SECONDARY_COMPUTATIONAL_FORMAT, cr.getComputationalId().toLowerCase(), "s" + i))
+                    .resourceType(VOLUME).build());
+        }
+        return list.stream();
+    }
+
+    private static BillingReportLine.BillingReportLineBuilder withUserProjectEndpoint(UserInstanceDTO userInstance) {
+        return BillingReportLine.builder().user(userInstance.getUser()).project(userInstance.getProject()).endpoint(userInstance.getEndpoint());
+    }
+
+    public static String getComputationalShape(UserComputationalResource resource) {
+        return DataEngineType.fromDockerImageName(resource.getImageName()) == DataEngineType.SPARK_STANDALONE ?
+                String.format(DATAENGINE_NAME_FORMAT, resource.getDataengineInstanceCount(), resource.getDataengineShape()) :
+                String.format(DATAENGINE_SERVICE_NAME_FORMAT, resource.getMasterNodeShape(), resource.getSlaveNodeShape());
+    }
+
+    private static Stream<BillingReportLine> standardImageBillingDataStream(String sbn, String endpoint) {
+        List<BillingReportLine> list = new ArrayList<>();
+        for (String notebook : AVAILABLE_NOTEBOOKS) {
+            list.add(BillingReportLine.builder().resourceName(IMAGE_NAME).dlabId(String.format(IMAGE_STANDARD_FORMAT2, sbn, endpoint, notebook).toLowerCase())
+                    .user(SHARED_RESOURCE).project(SHARED_RESOURCE).resourceType(IMAGE).build());
+        }
+
+        return list.stream();
+    }
+
+    private static Stream<BillingReportLine> standardImageBillingDataStream(String sbn, String project, String endpoint) {
+        List<BillingReportLine> list = new ArrayList<>();
+        for (String notebook : AVAILABLE_NOTEBOOKS) {
+            list.add(BillingReportLine.builder().resourceName(IMAGE_NAME).dlabId(String.format(IMAGE_STANDARD_FORMAT1, sbn, project, endpoint, notebook).toLowerCase())
+                    .project(project).user(SHARED_RESOURCE).resourceType(IMAGE).build());
+        }
+
+        return list.stream();
+    }
+
+    public static String getFirstLine(String sbn, LocalDate from, LocalDate to) {
+        return CSVFormatter.formatLine(Lists.newArrayList(String.format(REPORT_FIRST_LINE, sbn,
+                Optional.ofNullable(from).map(date -> date.format(DateTimeFormatter.ISO_DATE)).orElse(StringUtils.EMPTY),
+                Optional.ofNullable(to).map(date -> date.format(DateTimeFormatter.ISO_DATE)).orElse(StringUtils.EMPTY))),
+                CSVFormatter.SEPARATOR, '\"');
+    }
+
+    public static String getHeader(boolean isFull) {
+        List<String> headers = new ArrayList<>(Arrays.asList(BillingUtils.REPORT_HEADERS));
+        if (!isFull) {
+            headers.remove(1);
+        }
+        return CSVFormatter.formatLine(headers, CSVFormatter.SEPARATOR);
+    }
+
+    public static String printLine(BillingReportLine line, boolean isFull) {
+        List<String> lines = new ArrayList<>();
+        lines.add(getOrEmpty(line.getDlabId()));
+        if (isFull) {
+            lines.add(getOrEmpty(line.getUser()));
+        }
+        lines.add(getOrEmpty(line.getProject()));
+        lines.add(getOrEmpty(Optional.ofNullable(line.getResourceType()).map(r -> StringUtils.capitalize(r.toString().toLowerCase())).orElse(null)));
+        lines.add(getOrEmpty(Optional.ofNullable(line.getStatus()).map(UserInstanceStatus::toString).orElse(null)));
+        lines.add(getOrEmpty(line.getShape()));
+        lines.add(getOrEmpty(line.getProduct()));
+        lines.add(getOrEmpty(Optional.ofNullable(line.getCost()).map(String::valueOf).orElse(null)));
+        return CSVFormatter.formatLine(lines, CSVFormatter.SEPARATOR);
+    }
+
+    public static String getTotal(Double total, String currency) {
+        List<String> totalLine = new ArrayList<>();
+        for (int i = 0; i < REPORT_HEADERS.length - 1; i++) {
+            totalLine.add(StringUtils.EMPTY);
+        }
+        totalLine.add(REPORT_HEADERS.length - 1, String.format(TOTAL_LINE, getOrEmpty(String.valueOf(total)), getOrEmpty(currency)));
+        return CSVFormatter.formatLine(totalLine, CSVFormatter.SEPARATOR);
+
+    }
+
+    private static String getOrEmpty(String s) {
+        return Objects.nonNull(s) ? s : StringUtils.EMPTY;
+    }
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
index ac858e0..afe06cd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
@@ -22,6 +22,7 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.dao.SettingsDAO;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.domain.ExploratoryLibCache;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.BackupFormDTO;
@@ -30,41 +31,48 @@
 import com.epam.dlab.backendapi.resources.dto.aws.AwsComputationalCreateForm;
 import com.epam.dlab.backendapi.resources.dto.gcp.GcpComputationalCreateForm;
 import com.epam.dlab.cloud.CloudProvider;
-import com.epam.dlab.dto.*;
+import com.epam.dlab.dto.LibListComputationalDTO;
+import com.epam.dlab.dto.ResourceBaseDTO;
+import com.epam.dlab.dto.ResourceSysBaseDTO;
+import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.aws.AwsCloudSettings;
 import com.epam.dlab.dto.aws.computational.AwsComputationalTerminateDTO;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.dto.aws.computational.ComputationalCreateAws;
 import com.epam.dlab.dto.aws.computational.SparkComputationalCreateAws;
-import com.epam.dlab.dto.aws.edge.EdgeCreateAws;
 import com.epam.dlab.dto.aws.exploratory.ExploratoryCreateAws;
-import com.epam.dlab.dto.aws.keyload.UploadFileAws;
 import com.epam.dlab.dto.azure.AzureCloudSettings;
 import com.epam.dlab.dto.azure.computational.SparkComputationalCreateAzure;
-import com.epam.dlab.dto.azure.edge.EdgeCreateAzure;
 import com.epam.dlab.dto.azure.exploratory.ExploratoryActionStartAzure;
 import com.epam.dlab.dto.azure.exploratory.ExploratoryActionStopAzure;
 import com.epam.dlab.dto.azure.exploratory.ExploratoryCreateAzure;
-import com.epam.dlab.dto.azure.keyload.UploadFileAzure;
 import com.epam.dlab.dto.backup.EnvBackupDTO;
 import com.epam.dlab.dto.base.CloudSettings;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
-import com.epam.dlab.dto.base.keyload.UploadFile;
-import com.epam.dlab.dto.computational.*;
-import com.epam.dlab.dto.exploratory.*;
+import com.epam.dlab.dto.computational.ComputationalCheckInactivityDTO;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStartDTO;
+import com.epam.dlab.dto.computational.ComputationalStopDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.UserComputationalResource;
+import com.epam.dlab.dto.exploratory.ExploratoryActionDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryCheckInactivityAction;
+import com.epam.dlab.dto.exploratory.ExploratoryCreateDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryGitCredsDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryGitCredsUpdateDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryImageDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryReconfigureSparkClusterActionDTO;
+import com.epam.dlab.dto.exploratory.LibInstallDTO;
+import com.epam.dlab.dto.exploratory.LibraryInstallDTO;
 import com.epam.dlab.dto.gcp.GcpCloudSettings;
 import com.epam.dlab.dto.gcp.computational.ComputationalCreateGcp;
 import com.epam.dlab.dto.gcp.computational.GcpComputationalTerminateDTO;
 import com.epam.dlab.dto.gcp.computational.SparkComputationalCreateGcp;
-import com.epam.dlab.dto.gcp.edge.EdgeCreateGcp;
 import com.epam.dlab.dto.gcp.exploratory.ExploratoryCreateGcp;
-import com.epam.dlab.dto.gcp.keyload.UploadFileGcp;
 import com.epam.dlab.dto.project.ProjectActionDTO;
 import com.epam.dlab.dto.project.ProjectCreateDTO;
-import com.epam.dlab.dto.reuploadkey.ReuploadKeyDTO;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.model.exploratory.Exploratory;
 import com.epam.dlab.util.UsernameUtils;
 import com.google.inject.Inject;
@@ -74,7 +82,9 @@
 import java.util.Map;
 import java.util.UUID;
 
-import static com.epam.dlab.cloud.CloudProvider.*;
+import static com.epam.dlab.cloud.CloudProvider.AWS;
+import static com.epam.dlab.cloud.CloudProvider.AZURE;
+import static com.epam.dlab.cloud.CloudProvider.GCP;
 
 @Singleton
 public class RequestBuilder {
@@ -86,8 +96,8 @@
 	@Inject
 	private SettingsDAO settingsDAO;
 
-	private CloudSettings cloudSettings(UserInfo userInfo) {
-		switch (cloudProvider()) {
+	private CloudSettings cloudSettings(UserInfo userInfo, CloudProvider cloudProvider) {
+		switch (cloudProvider) {
 			case AWS:
 				return AwsCloudSettings.builder()
 						.awsIamUser(userInfo.getName())
@@ -99,32 +109,33 @@
 				return GcpCloudSettings.builder()
 						.gcpIamUser(userInfo.getName()).build();
 			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 	}
 
 	@SuppressWarnings("unchecked")
-	private <T extends ResourceBaseDTO<?>> T newResourceBaseDTO(UserInfo userInfo, Class<T> resourceClass) {
+	private <T extends ResourceBaseDTO<?>> T newResourceBaseDTO(UserInfo userInfo, CloudProvider cloudProvider,
+																Class<T> resourceClass) {
 		try {
 			return (T) resourceClass.newInstance()
-					.withEdgeUserName(getEdgeUserName(userInfo))
-					.withCloudSettings(cloudSettings(userInfo));
+					.withEdgeUserName(getEdgeUserName(userInfo, cloudProvider))
+					.withCloudSettings(cloudSettings(userInfo, cloudProvider));
 		} catch (Exception e) {
 			throw new DlabException("Cannot create instance of resource class " + resourceClass.getName() + ". " +
 					e.getLocalizedMessage(), e);
 		}
 	}
 
-	private String getEdgeUserName(UserInfo userInfo) {
+	private String getEdgeUserName(UserInfo userInfo, CloudProvider cloudProvider) {
 		String edgeUser = UsernameUtils.replaceWhitespaces(userInfo.getSimpleName());
-		switch (cloudProvider()) {
+		switch (cloudProvider) {
 			case GCP:
 				return adjustUserName(configuration.getMaxUserNameLength(), edgeUser);
 			case AWS:
 			case AZURE:
 				return edgeUser;
 			default:
-				throw new DlabException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new DlabException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 	}
 
@@ -134,71 +145,26 @@
 	}
 
 	@SuppressWarnings("unchecked")
-	private <T extends ResourceSysBaseDTO<?>> T newResourceSysBaseDTO(UserInfo userInfo, Class<T> resourceClass) {
-		return newResourceBaseDTO(userInfo, resourceClass);
+	private <T extends ResourceSysBaseDTO<?>> T newResourceSysBaseDTO(UserInfo userInfo, CloudProvider cloudProvider,
+																	  Class<T> resourceClass) {
+		return newResourceBaseDTO(userInfo, cloudProvider, resourceClass);
 	}
 
 	@SuppressWarnings("unchecked")
-	public UploadFile newEdgeKeyUpload(UserInfo userInfo, String content) {
-
-		switch (cloudProvider()) {
-			case AWS:
-				EdgeCreateAws edgeCreateAws = newResourceSysBaseDTO(userInfo, EdgeCreateAws.class);
-				UploadFileAws uploadFileAws = new UploadFileAws();
-				uploadFileAws.setEdge(edgeCreateAws);
-				uploadFileAws.setContent(content);
-
-				return uploadFileAws;
-
-			case AZURE:
-				EdgeCreateAzure edgeCreateAzure = newResourceSysBaseDTO(userInfo, EdgeCreateAzure.class)
-						.withAzureDataLakeEnable(Boolean.toString(settingsDAO.isAzureDataLakeEnabled()));
-
-				UploadFileAzure uploadFileAzure = new UploadFileAzure();
-				uploadFileAzure.setEdge(edgeCreateAzure);
-				uploadFileAzure.setContent(content);
-
-				return uploadFileAzure;
-
-			case GCP:
-				return new UploadFileGcp(newResourceSysBaseDTO(userInfo, EdgeCreateGcp.class), content);
-			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
-		}
-	}
-
-	public ReuploadKeyDTO newKeyReupload(UserInfo userInfo, String id, String content, List<ResourceData> resources) {
-		return newResourceSysBaseDTO(userInfo, ReuploadKeyDTO.class)
-				.withId(id)
-				.withContent(content)
-				.withResources(resources);
-	}
-
-	@SuppressWarnings("unchecked")
-	public <T extends ResourceSysBaseDTO<?>> T newEdgeAction(UserInfo userInfo) {
-		checkInappropriateCloudProviderOrElseThrowException();
-		return (T) newResourceSysBaseDTO(userInfo, ResourceSysBaseDTO.class);
-	}
-
-	public UserEnvironmentResources newUserEnvironmentStatus(UserInfo userInfo) {
-		checkInappropriateCloudProviderOrElseThrowException();
-		return newResourceSysBaseDTO(userInfo, UserEnvironmentResources.class);
-	}
-
-	@SuppressWarnings("unchecked")
-	public <T extends ExploratoryCreateDTO<T>> T newExploratoryCreate(Exploratory exploratory, UserInfo userInfo,
+	public <T extends ExploratoryCreateDTO<T>> T newExploratoryCreate(ProjectDTO projectDTO, EndpointDTO endpointDTO, Exploratory exploratory,
+																	  UserInfo userInfo,
 																	  ExploratoryGitCredsDTO exploratoryGitCredsDTO,
 																	  Map<String, String> tags) {
 
 		T exploratoryCreate;
-
-		switch (cloudProvider()) {
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
+		switch (cloudProvider) {
 			case AWS:
-				exploratoryCreate = (T) newResourceSysBaseDTO(userInfo, ExploratoryCreateAws.class)
+				exploratoryCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryCreateAws.class)
 						.withNotebookInstanceType(exploratory.getShape());
 				break;
 			case AZURE:
-				exploratoryCreate = (T) newResourceSysBaseDTO(userInfo, ExploratoryCreateAzure.class)
+				exploratoryCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryCreateAzure.class)
 						.withNotebookInstanceSize(exploratory.getShape());
 				if (settingsDAO.isAzureDataLakeEnabled()) {
 					((ExploratoryCreateAzure) exploratoryCreate)
@@ -209,11 +175,11 @@
 						.withAzureDataLakeEnabled(Boolean.toString(settingsDAO.isAzureDataLakeEnabled()));
 				break;
 			case GCP:
-				exploratoryCreate = (T) newResourceSysBaseDTO(userInfo, ExploratoryCreateGcp.class)
+				exploratoryCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryCreateGcp.class)
 						.withNotebookInstanceType(exploratory.getShape());
 				break;
 			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 
 		return exploratoryCreate.withExploratoryName(exploratory.getName())
@@ -224,19 +190,21 @@
 				.withClusterConfig(exploratory.getClusterConfig())
 				.withProject(exploratory.getProject())
 				.withEndpoint(exploratory.getEndpoint())
+				.withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()))
 				.withTags(tags);
 	}
 
 	@SuppressWarnings("unchecked")
 	public <T extends ExploratoryGitCredsUpdateDTO> T newExploratoryStart(UserInfo userInfo,
 																		  UserInstanceDTO userInstance,
+																		  EndpointDTO endpointDTO,
 																		  ExploratoryGitCredsDTO
 																				  exploratoryGitCredsDTO) {
-
-		switch (cloudProvider()) {
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
+		switch (cloudProvider) {
 			case AWS:
 			case GCP:
-				return (T) newResourceSysBaseDTO(userInfo, ExploratoryGitCredsUpdateDTO.class)
+				return (T) newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryGitCredsUpdateDTO.class)
 						.withNotebookInstanceName(userInstance.getExploratoryId())
 						.withGitCreds(exploratoryGitCredsDTO.getGitCreds())
 						.withNotebookImage(userInstance.getImageName())
@@ -245,7 +213,7 @@
 						.withProject(userInstance.getProject())
 						.withEndpoint(userInstance.getEndpoint());
 			case AZURE:
-				T exploratoryStart = (T) newResourceSysBaseDTO(userInfo, ExploratoryActionStartAzure.class)
+				T exploratoryStart = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryActionStartAzure.class)
 						.withNotebookInstanceName(userInstance.getExploratoryId())
 						.withGitCreds(exploratoryGitCredsDTO.getGitCreds())
 						.withNotebookImage(userInstance.getImageName())
@@ -264,25 +232,26 @@
 
 				return exploratoryStart;
 			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 	}
 
 	@SuppressWarnings("unchecked")
-	public <T extends ExploratoryActionDTO<T>> T newExploratoryStop(UserInfo userInfo, UserInstanceDTO userInstance) {
+	public <T extends ExploratoryActionDTO<T>> T newExploratoryStop(UserInfo userInfo, UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
 
 		T exploratoryStop;
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
 
-		switch (cloudProvider()) {
+		switch (cloudProvider) {
 			case AWS:
 			case GCP:
-				exploratoryStop = (T) newResourceSysBaseDTO(userInfo, ExploratoryActionDTO.class);
+				exploratoryStop = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryActionDTO.class);
 				break;
 			case AZURE:
-				exploratoryStop = (T) newResourceSysBaseDTO(userInfo, ExploratoryActionStopAzure.class);
+				exploratoryStop = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryActionStopAzure.class);
 				break;
 			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 
 		return exploratoryStop
@@ -296,9 +265,10 @@
 	}
 
 	public ExploratoryGitCredsUpdateDTO newGitCredentialsUpdate(UserInfo userInfo, UserInstanceDTO instanceDTO,
+																EndpointDTO endpointDTO,
 																ExploratoryGitCredsDTO exploratoryGitCredsDTO) {
-		checkInappropriateCloudProviderOrElseThrowException();
-		return newResourceSysBaseDTO(userInfo, ExploratoryGitCredsUpdateDTO.class)
+		checkInappropriateCloudProviderOrElseThrowException(endpointDTO.getCloudProvider());
+		return newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ExploratoryGitCredsUpdateDTO.class)
 				.withNotebookImage(instanceDTO.getImageName())
 				.withApplicationName(getApplicationNameFromImage(instanceDTO.getImageName()))
 				.withProject(instanceDTO.getProject())
@@ -309,24 +279,27 @@
 	}
 
 	public LibraryInstallDTO newLibInstall(UserInfo userInfo, UserInstanceDTO userInstance,
-										   List<LibInstallDTO> libs) {
-		checkInappropriateCloudProviderOrElseThrowException();
-		return newResourceSysBaseDTO(userInfo, LibraryInstallDTO.class)
+										   EndpointDTO endpointDTO, List<LibInstallDTO> libs) {
+		checkInappropriateCloudProviderOrElseThrowException(endpointDTO.getCloudProvider());
+		return newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), LibraryInstallDTO.class)
 				.withNotebookImage(userInstance.getImageName())
 				.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()))
 				.withNotebookInstanceName(userInstance.getExploratoryId())
 				.withExploratoryName(userInstance.getExploratoryName())
 				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName())
 				.withLibs(libs);
 	}
 
 	@SuppressWarnings("unchecked")
 	public <T extends ExploratoryActionDTO<T>> T newLibExploratoryList(UserInfo userInfo,
-																	   UserInstanceDTO userInstance) {
-		checkInappropriateCloudProviderOrElseThrowException();
-		return (T) newResourceSysBaseDTO(userInfo, ExploratoryActionDTO.class)
+																	   UserInstanceDTO userInstance,
+																	   EndpointDTO endpointDTO) {
+		checkInappropriateCloudProviderOrElseThrowException(endpointDTO.getCloudProvider());
+		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ExploratoryActionDTO.class)
 				.withNotebookInstanceName(userInstance.getExploratoryId())
 				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName())
 				.withNotebookImage(userInstance.getImageName())
 				.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()))
 				.withExploratoryName(userInstance.getExploratoryName());
@@ -335,13 +308,14 @@
 	@SuppressWarnings("unchecked")
 	public <T extends LibraryInstallDTO> T newLibInstall(UserInfo userInfo, UserInstanceDTO userInstance,
 														 UserComputationalResource computationalResource,
-														 List<LibInstallDTO> libs) {
-		checkInappropriateCloudProviderOrElseThrowException();
-		return (T) newResourceSysBaseDTO(userInfo, LibraryInstallDTO.class)
+														 List<LibInstallDTO> libs, EndpointDTO endpointDTO) {
+		checkInappropriateCloudProviderOrElseThrowException(endpointDTO.getCloudProvider());
+		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), LibraryInstallDTO.class)
 				.withComputationalId(computationalResource.getComputationalId())
 				.withComputationalName(computationalResource.getComputationalName())
 				.withExploratoryName(userInstance.getExploratoryName())
 				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName())
 				.withComputationalImage(computationalResource.getImageName())
 				.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()))
 				.withLibs(libs);
@@ -351,50 +325,55 @@
 	public <T extends LibListComputationalDTO> T newLibComputationalList(UserInfo userInfo,
 																		 UserInstanceDTO userInstance,
 																		 UserComputationalResource
-																				 computationalResource) {
+																				 computationalResource,
+																		 EndpointDTO endpointDTO) {
 
-		checkInappropriateCloudProviderOrElseThrowException();
-		return (T) newResourceSysBaseDTO(userInfo, LibListComputationalDTO.class)
+		checkInappropriateCloudProviderOrElseThrowException(endpointDTO.getCloudProvider());
+		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), LibListComputationalDTO.class)
 				.withComputationalId(computationalResource.getComputationalId())
 				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName())
 				.withComputationalImage(computationalResource.getImageName())
 				.withLibCacheKey(ExploratoryLibCache.libraryCacheKey(userInstance))
 				.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()));
 	}
 
 	@SuppressWarnings("unchecked")
-	public <T extends ComputationalBase<T>> T newComputationalCreate(UserInfo userInfo,
+	public <T extends ComputationalBase<T>> T newComputationalCreate(UserInfo userInfo, ProjectDTO projectDTO,
 																	 UserInstanceDTO userInstance,
-																	 ComputationalCreateFormDTO form) {
+																	 ComputationalCreateFormDTO form,
+																	 EndpointDTO endpointDTO) {
 		T computationalCreate;
-
-		switch (cloudProvider()) {
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
+		switch (cloudProvider) {
 			case AZURE:
 				throw new UnsupportedOperationException("Creating dataengine service is not supported yet");
 			case AWS:
 				AwsComputationalCreateForm awsForm = (AwsComputationalCreateForm) form;
-				computationalCreate = (T) newResourceSysBaseDTO(userInfo, ComputationalCreateAws.class)
+				computationalCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ComputationalCreateAws.class)
 						.withInstanceCount(awsForm.getInstanceCount())
 						.withMasterInstanceType(awsForm.getMasterInstanceType())
 						.withSlaveInstanceType(awsForm.getSlaveInstanceType())
 						.withSlaveInstanceSpot(awsForm.getSlaveInstanceSpot())
 						.withSlaveInstanceSpotPctPrice(awsForm.getSlaveInstanceSpotPctPrice())
 						.withVersion(awsForm.getVersion())
-						.withConfig((awsForm.getConfig()));
+						.withConfig((awsForm.getConfig()))
+						.withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()));
 				break;
 			case GCP:
 				GcpComputationalCreateForm gcpForm = (GcpComputationalCreateForm) form;
-				computationalCreate = (T) newResourceSysBaseDTO(userInfo, ComputationalCreateGcp.class)
+				computationalCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ComputationalCreateGcp.class)
 						.withMasterInstanceCount(gcpForm.getMasterInstanceCount())
 						.withSlaveInstanceCount(gcpForm.getSlaveInstanceCount())
 						.withPreemptibleCount(gcpForm.getPreemptibleCount())
 						.withMasterInstanceType(gcpForm.getMasterInstanceType())
 						.withSlaveInstanceType(gcpForm.getSlaveInstanceType())
-						.withVersion(gcpForm.getVersion());
+						.withVersion(gcpForm.getVersion())
+						.withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()));
 				break;
 
 			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 
 		return computationalCreate
@@ -409,26 +388,29 @@
 	}
 
 	@SuppressWarnings("unchecked")
-	public <T extends ComputationalBase<T>> T newComputationalCreate(UserInfo userInfo,
+	public <T extends ComputationalBase<T>> T newComputationalCreate(UserInfo userInfo, ProjectDTO projectDTO,
 																	 UserInstanceDTO userInstance,
-																	 SparkStandaloneClusterCreateForm form) {
+																	 SparkStandaloneClusterCreateForm form,
+																	 EndpointDTO endpointDTO) {
 
 		T computationalCreate;
-
-		switch (cloudProvider()) {
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
+		switch (cloudProvider) {
 			case AWS:
-				computationalCreate = (T) newResourceSysBaseDTO(userInfo, SparkComputationalCreateAws.class)
+				computationalCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, SparkComputationalCreateAws.class)
 						.withDataEngineInstanceCount(form.getDataEngineInstanceCount())
 						.withDataEngineMasterShape(form.getDataEngineInstanceShape())
 						.withDataEngineSlaveShape(form.getDataEngineInstanceShape())
-						.withConfig(form.getConfig());
+						.withConfig(form.getConfig())
+						.withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()));
 				break;
 			case AZURE:
-				computationalCreate = (T) newResourceSysBaseDTO(userInfo, SparkComputationalCreateAzure.class)
+				computationalCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, SparkComputationalCreateAzure.class)
 						.withDataEngineInstanceCount(form.getDataEngineInstanceCount())
 						.withDataEngineMasterSize(form.getDataEngineInstanceShape())
 						.withDataEngineSlaveSize(form.getDataEngineInstanceShape())
-						.withConfig(form.getConfig());
+						.withConfig(form.getConfig())
+						.withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()));
 				if (settingsDAO.isAzureDataLakeEnabled()) {
 					((SparkComputationalCreateAzure) computationalCreate)
 							.withAzureUserRefreshToken(userInfo.getKeys().get(AZURE_REFRESH_TOKEN_KEY));
@@ -439,14 +421,15 @@
 
 				break;
 			case GCP:
-				computationalCreate = (T) newResourceSysBaseDTO(userInfo, SparkComputationalCreateGcp.class)
+				computationalCreate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, SparkComputationalCreateGcp.class)
 						.withDataEngineInstanceCount(form.getDataEngineInstanceCount())
 						.withDataEngineMasterSize(form.getDataEngineInstanceShape())
 						.withDataEngineSlaveSize(form.getDataEngineInstanceShape())
-						.withConfig(form.getConfig());
+						.withConfig(form.getConfig())
+						.withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()));
 				break;
 			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 
 		return computationalCreate
@@ -463,12 +446,13 @@
 	@SuppressWarnings("unchecked")
 	public <T extends ComputationalBase<T>> T newComputationalTerminate(UserInfo userInfo,
 																		UserInstanceDTO userInstanceDTO,
-																		UserComputationalResource computationalResource) {
+																		UserComputationalResource computationalResource,
+																		EndpointDTO endpointDTO) {
 		T computationalTerminate;
-
-		switch (cloudProvider()) {
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
+		switch (cloudProvider) {
 			case AWS:
-				AwsComputationalTerminateDTO terminateDTO = newResourceSysBaseDTO(userInfo,
+				AwsComputationalTerminateDTO terminateDTO = newResourceSysBaseDTO(userInfo, cloudProvider,
 						AwsComputationalTerminateDTO.class);
 				if (computationalResource.getDataEngineType() == DataEngineType.CLOUD_SERVICE) {
 					terminateDTO.setClusterName(computationalResource.getComputationalId());
@@ -476,10 +460,10 @@
 				computationalTerminate = (T) terminateDTO;
 				break;
 			case AZURE:
-				computationalTerminate = (T) newResourceSysBaseDTO(userInfo, ComputationalTerminateDTO.class);
+				computationalTerminate = (T) newResourceSysBaseDTO(userInfo, cloudProvider, ComputationalTerminateDTO.class);
 				break;
 			case GCP:
-				GcpComputationalTerminateDTO gcpTerminateDTO = newResourceSysBaseDTO(userInfo,
+				GcpComputationalTerminateDTO gcpTerminateDTO = newResourceSysBaseDTO(userInfo, cloudProvider,
 						GcpComputationalTerminateDTO.class);
 				if (computationalResource.getDataEngineType() == DataEngineType.CLOUD_SERVICE) {
 					gcpTerminateDTO.setClusterName(computationalResource.getComputationalId());
@@ -488,7 +472,7 @@
 				break;
 
 			default:
-				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider());
+				throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + cloudProvider);
 		}
 
 		return computationalTerminate
@@ -500,33 +484,34 @@
 	}
 
 	@SuppressWarnings("unchecked")
-	public <T extends ComputationalBase<T>> T newComputationalStop(UserInfo userInfo,
-																   UserInstanceDTO exploratory,
-																   String computationalName) {
-		return (T) newResourceSysBaseDTO(userInfo, ComputationalStopDTO.class)
+	public <T extends ComputationalBase<T>> T newComputationalStop(UserInfo userInfo, UserInstanceDTO exploratory,
+																   String computationalName, EndpointDTO endpointDTO) {
+		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ComputationalStopDTO.class)
 				.withExploratoryName(exploratory.getExploratoryName())
 				.withComputationalName(computationalName)
 				.withNotebookInstanceName(exploratory.getExploratoryId())
 				.withApplicationName(getApplicationNameFromImage(exploratory.getImageName()))
-				.withProject(exploratory.getProject());
+				.withProject(exploratory.getProject())
+				.withEndpoint(endpointDTO.getName());
 	}
 
 	@SuppressWarnings("unchecked")
 	public <T extends ComputationalBase<T>> T newComputationalStart(UserInfo userInfo, UserInstanceDTO exploratory,
-																	String computationalName) {
-		return (T) newResourceSysBaseDTO(userInfo, ComputationalStartDTO.class)
+																	String computationalName, EndpointDTO endpointDTO) {
+		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ComputationalStartDTO.class)
 				.withExploratoryName(exploratory.getExploratoryName())
 				.withComputationalName(computationalName)
 				.withNotebookInstanceName(exploratory.getExploratoryId())
 				.withApplicationName(getApplicationNameFromImage(exploratory.getImageName()))
-				.withProject(exploratory.getProject());
+				.withProject(exploratory.getProject())
+				.withEndpoint(endpointDTO.getName());
 	}
 
 	@SuppressWarnings("unchecked")
 	public <T extends ExploratoryImageDTO> T newExploratoryImageCreate(UserInfo userInfo, UserInstanceDTO userInstance,
-																	   String imageName) {
-		checkInappropriateCloudProviderOrElseThrowException();
-		return (T) newResourceSysBaseDTO(userInfo, ExploratoryImageDTO.class)
+																	   String imageName, EndpointDTO endpointDTO, ProjectDTO projectDTO) {
+		checkInappropriateCloudProviderOrElseThrowException(endpointDTO.getCloudProvider());
+		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ExploratoryImageDTO.class)
 				.withProject(userInstance.getProject())
 				.withNotebookInstanceName(userInstance.getExploratoryId())
 				.withExploratoryName(userInstance.getExploratoryName())
@@ -534,14 +519,15 @@
 				.withNotebookImage(userInstance.getImageName())
 				.withImageName(imageName)
 				.withEndpoint(userInstance.getEndpoint())
-				.withTags(userInstance.getTags());
+				.withTags(userInstance.getTags())
+				.withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()));
 	}
 
 	@SuppressWarnings("unchecked")
 	public <T extends ComputationalBase<T>> T newComputationalCheckInactivity(UserInfo userInfo,
 																			  UserInstanceDTO exploratory,
-																			  UserComputationalResource cr) {
-		return (T) newResourceSysBaseDTO(userInfo, ComputationalCheckInactivityDTO.class)
+																			  UserComputationalResource cr, EndpointDTO endpointDTO) {
+		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ComputationalCheckInactivityDTO.class)
 				.withExploratoryName(exploratory.getExploratoryName())
 				.withComputationalName(cr.getComputationalName())
 				.withNotebookInstanceName(exploratory.getExploratoryId())
@@ -549,7 +535,8 @@
 				.withNotebookImageName(exploratory.getImageName())
 				.withImage(cr.getImageName())
 				.withComputationalId(cr.getComputationalId())
-				.withProject(exploratory.getProject());
+				.withProject(exploratory.getProject())
+				.withEndpoint(endpointDTO.getName());
 	}
 
 
@@ -569,8 +556,9 @@
 
 	public ComputationalClusterConfigDTO newClusterConfigUpdate(UserInfo userInfo, UserInstanceDTO userInstanceDTO,
 																UserComputationalResource compRes,
-																List<ClusterConfig> config) {
-		final ComputationalClusterConfigDTO clusterConfigDTO = newResourceSysBaseDTO(userInfo,
+																List<ClusterConfig> config, EndpointDTO endpointDTO) {
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
+		final ComputationalClusterConfigDTO clusterConfigDTO = newResourceSysBaseDTO(userInfo, cloudProvider,
 				ComputationalClusterConfigDTO.class)
 				.withExploratoryName(userInstanceDTO.getExploratoryName())
 				.withNotebookInstanceName(userInstanceDTO.getExploratoryId())
@@ -580,7 +568,7 @@
 				.withEndpoint(userInstanceDTO.getEndpoint());
 		clusterConfigDTO.setCopmutationalId(compRes.getComputationalId());
 		clusterConfigDTO.setConfig(config);
-		if (cloudProvider() == AZURE && settingsDAO.isAzureDataLakeEnabled()) {
+		if (cloudProvider == AZURE && settingsDAO.isAzureDataLakeEnabled()) {
 			clusterConfigDTO.setAzureUserRefreshToken(userInfo.getKeys().get(AZURE_REFRESH_TOKEN_KEY));
 		}
 
@@ -589,10 +577,12 @@
 
 	public ExploratoryReconfigureSparkClusterActionDTO newClusterConfigUpdate(UserInfo userInfo,
 																			  UserInstanceDTO userInstance,
-																			  List<ClusterConfig> config) {
+																			  List<ClusterConfig> config,
+																			  EndpointDTO endpointDTO) {
 
+		CloudProvider cloudProvider = endpointDTO.getCloudProvider();
 		final ExploratoryReconfigureSparkClusterActionDTO dto =
-				newResourceSysBaseDTO(userInfo, ExploratoryReconfigureSparkClusterActionDTO.class)
+				newResourceSysBaseDTO(userInfo, cloudProvider, ExploratoryReconfigureSparkClusterActionDTO.class)
 						.withNotebookInstanceName(userInstance.getExploratoryId())
 						.withExploratoryName(userInstance.getExploratoryName())
 						.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()))
@@ -600,7 +590,7 @@
 						.withConfig(config)
 						.withProject(userInstance.getProject())
 						.withEndpoint(userInstance.getEndpoint());
-		if (cloudProvider() == AZURE && settingsDAO.isAzureDataLakeEnabled()) {
+		if (cloudProvider == AZURE && settingsDAO.isAzureDataLakeEnabled()) {
 			dto.withAzureUserRefreshToken(userInfo.getKeys().get(AZURE_REFRESH_TOKEN_KEY));
 		}
 
@@ -608,35 +598,32 @@
 	}
 
 	public ExploratoryCheckInactivityAction newExploratoryCheckInactivityAction(UserInfo userInfo,
-																				UserInstanceDTO userInstance) {
-		final ExploratoryCheckInactivityAction dto = newResourceSysBaseDTO(userInfo,
+																				UserInstanceDTO userInstance,
+																				EndpointDTO endpointDTO) {
+		final ExploratoryCheckInactivityAction dto = newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(),
 				ExploratoryCheckInactivityAction.class);
 		dto.withNotebookInstanceName(userInstance.getExploratoryId())
 				.withNotebookImage(userInstance.getImageName())
 				.withExploratoryName(userInstance.getExploratoryName())
 				.withReuploadKeyRequired(userInstance.isReuploadKeyRequired())
-				.withProject(userInstance.getProject());
+				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName());
 		return dto;
 	}
 
-	public ProjectCreateDTO newProjectCreate(UserInfo userInfo, ProjectDTO projectDTO, String endpoint) {
+	public ProjectCreateDTO newProjectCreate(UserInfo userInfo, ProjectDTO projectDTO, EndpointDTO endpointDTO) {
 		return ProjectCreateDTO.builder()
 				.key(projectDTO.getKey().replace("\n", ""))
 				.name(projectDTO.getName())
 				.tag(projectDTO.getTag())
-				.endpoint(endpoint)
-				.useSharedImage(String.valueOf(projectDTO.isUseSharedImage()))
+				.endpoint(endpointDTO.getName())
 				.build()
-				.withCloudSettings(cloudSettings(userInfo));
+				.withCloudSettings(cloudSettings(userInfo, endpointDTO.getCloudProvider()));
 	}
 
-	public ProjectActionDTO newProjectAction(UserInfo userInfo, String project, String endpoint) {
-		return new ProjectActionDTO(project, endpoint)
-				.withCloudSettings(cloudSettings(userInfo));
-	}
-
-	private CloudProvider cloudProvider() {
-		return configuration.getCloudProvider();
+	public ProjectActionDTO newProjectAction(UserInfo userInfo, String project, EndpointDTO endpointDTO) {
+		return new ProjectActionDTO(project, endpointDTO.getName())
+				.withCloudSettings(cloudSettings(userInfo, endpointDTO.getCloudProvider()));
 	}
 
 	/**
@@ -655,8 +642,7 @@
 		return "";
 	}
 
-	private void checkInappropriateCloudProviderOrElseThrowException() {
-		CloudProvider provider = cloudProvider();
+	private void checkInappropriateCloudProviderOrElseThrowException(CloudProvider provider) {
 		if (provider != AWS && provider != AZURE && provider != GCP) {
 			throw new IllegalArgumentException(UNSUPPORTED_CLOUD_PROVIDER_MESSAGE + provider);
 		}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/validation/SelfServiceCloudConfigurationSequenceProvider.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/validation/SelfServiceCloudConfigurationSequenceProvider.java
deleted file mode 100644
index 1fd1748..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/validation/SelfServiceCloudConfigurationSequenceProvider.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.validation;
-
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.validation.CloudConfigurationSequenceProvider;
-
-public class SelfServiceCloudConfigurationSequenceProvider
-        extends CloudConfigurationSequenceProvider<SelfServiceApplicationConfiguration> {
-
-}
diff --git a/services/self-service/src/main/resources/mongo/aws/mongo_roles.json b/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
index 70eb16e..6a8fd29 100644
--- a/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
@@ -2,6 +2,8 @@
   {
     "_id": "nbShapes_p2.xlarge_fetching",
     "description": "Use p2.xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "p2.xlarge"
     ],
@@ -12,6 +14,8 @@
   {
     "_id": "nbShapes_t2.medium_fetching",
     "description": "Use t2.medium instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "t2.medium"
     ],
@@ -22,6 +26,8 @@
   {
     "_id": "nbShapes_r3.xlarge_fetching",
     "description": "Use r3.xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r3.xlarge"
     ],
@@ -32,6 +38,8 @@
   {
     "_id": "nbShapes_r4.2xlarge_fetching",
     "description": "Use r4.2xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r4.2xlarge"
     ],
@@ -42,6 +50,8 @@
   {
     "_id": "nbShapes_r3.4xlarge_fetching",
     "description": "Use r3.4xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r3.4xlarge"
     ],
@@ -52,6 +62,8 @@
   {
     "_id": "nbShapes_r3.8xlarge_fetching",
     "description": "Use r3.8xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r3.8xlarge"
     ],
@@ -62,6 +74,8 @@
   {
     "_id": "nbShapes_c4.large_fetching",
     "description": "Use c4.large instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "c4.large"
     ],
@@ -72,6 +86,8 @@
   {
     "_id": "nbShapes_c4.2xlarge_fetching",
     "description": "Use c4.2xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "c4.2xlarge"
     ],
@@ -82,6 +98,8 @@
   {
     "_id": "nbShapes_c4.8xlarge_fetching",
     "description": "Use c4.8xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "c4.8xlarge"
     ],
@@ -92,6 +110,8 @@
   {
     "_id": "nbCreateDeeplearning",
     "description": "Create Notebook Deep Learning",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-deeplearning"
     ],
@@ -102,6 +122,8 @@
   {
     "_id": "nbCreateJupyter",
     "description": "Create Notebook Jupyter",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-jupyter"
     ],
@@ -110,8 +132,22 @@
     ]
   },
   {
+    "_id": "nbCreateJupyterLab",
+    "description": "Create Notebook JupyterLab",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
+    "exploratories": [
+      "docker.dlab-jupyterlab"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-rstudio"
     ],
@@ -122,6 +158,8 @@
   {
     "_id": "nbCreateTensor",
     "description": "Create Notebook Jupyter with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-tensor"
     ],
@@ -132,6 +170,8 @@
   {
     "_id": "nbCreateZeppelin",
     "description": "Create Notebook Apache Zeppelin",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-zeppelin"
     ],
@@ -142,6 +182,8 @@
   {
     "_id": "nbCreateTensorRstudio",
     "description": "Create Notebook RStudio with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-tensor-rstudio"
     ],
@@ -152,6 +194,8 @@
   {
     "_id": "nbCreateDataEngine",
     "description": "Create Data Engine",
+    "type": "COMPUTATIONAL",
+    "cloud": "AWS",
     "computationals": [
       "docker.dlab-dataengine"
     ],
@@ -162,6 +206,8 @@
   {
     "_id": "nbCreateDataEngineService",
     "description": "Create Data Engine Service",
+    "type": "COMPUTATIONAL",
+    "cloud": "AWS",
     "computationals": [
       "docker.dlab-dataengine-service"
     ],
@@ -172,6 +218,8 @@
   {
     "_id": "compShapes_c4.xlarge_fetching",
     "description": "Use c4.xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "c4.xlarge"
     ],
@@ -182,6 +230,8 @@
   {
     "_id": "compShapes_r3.xlarge_fetching",
     "description": "Use r3.xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r3.xlarge"
     ],
@@ -192,6 +242,8 @@
   {
     "_id": "compShapes_r4.2xlarge_fetching",
     "description": "Use r4.2xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r4.2xlarge"
     ],
@@ -202,6 +254,8 @@
   {
     "_id": "compShapes_r3.4xlarge_fetching",
     "description": "Use r3.4xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r3.4xlarge"
     ],
@@ -212,6 +266,8 @@
   {
     "_id": "compShapes_r3.8xlarge_fetching",
     "description": "Use r3.8xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r3.8xlarge"
     ],
@@ -222,6 +278,8 @@
   {
     "_id": "compShapes_c4.2xlarge_fetching",
     "description": "Use c4.2xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "c4.2xlarge"
     ],
@@ -232,6 +290,8 @@
   {
     "_id": "compShapes_c4.8xlarge_fetching",
     "description": "Use c4.8xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "c4.8xlarge"
     ],
@@ -242,6 +302,8 @@
   {
     "_id": "compShapes_p2.xlarge_fetching",
     "description": "Use p2.xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "p2.xlarge"
     ],
@@ -252,6 +314,8 @@
   {
     "_id": "nbBillingReportFull",
     "description": "View full billing report for all users",
+    "type": "BILLING",
+    "cloud": "AWS",
     "pages": [
       "/api/infrastructure_provision/billing"
     ],
@@ -260,12 +324,33 @@
     ]
   },
   {
+    "_id": "projectAdmin",
+    "description": "Allow to execute administration operation per project",
+    "type": "ADMINISTRATION",
+    "cloud": "AWS",
+    "pages": [
+      "environment/*",
+      "/roleManagement",
+      "/api/settings",
+      "/user/settings",
+      "/api/project",
+      "/api/endpoint"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "admin",
     "description": "Allow to execute administration operation",
+    "type": "ADMINISTRATION",
+    "cloud": "AWS",
     "pages": [
       "environment/*",
       "/api/infrastructure/backup",
       "/roleManagement",
+      "/roleManagement/create",
+      "/roleManagement/delete",
       "/api/settings",
       "/user/settings",
       "/api/project",
diff --git a/services/self-service/src/main/resources/mongo/azure/mongo_roles.json b/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
index 58cadb3..86eadff 100644
--- a/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
@@ -1,7 +1,9 @@
 [
   {
     "_id": "nbShapes_Standard_NC6_fetching",
-    "description": "Allow to use Standard_NC6 instance shape for notebook",
+    "description": "Use Standard_NC6 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_NC6"
     ],
@@ -12,6 +14,8 @@
   {
     "_id": "nbShapes_Standard_E4s_v3_fetching",
     "description": "Use Standard_E4s_v3 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_E4s_v3"
     ],
@@ -22,6 +26,8 @@
   {
     "_id": "nbShapes_Standard_E16s_v3_fetching",
     "description": "Use Standard_E16s_v3 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_E16s_v3"
     ],
@@ -32,6 +38,8 @@
   {
     "_id": "nbShapes_Standard_E32s_v3_fetching",
     "description": "Use Standard_E32s_v3 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_E32s_v3"
     ],
@@ -42,6 +50,8 @@
   {
     "_id": "nbShapes_Standard_F2s_fetching",
     "description": "Use Standard_F2s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F2s"
     ],
@@ -52,6 +62,8 @@
   {
     "_id": "nbShapes_Standard_F4s_fetching",
     "description": "Use Standard_F4s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F4s"
     ],
@@ -62,6 +74,8 @@
   {
     "_id": "nbShapes_Standard_F8s_fetching",
     "description": "Use Standard_F8s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F8s"
     ],
@@ -72,6 +86,8 @@
   {
     "_id": "nbShapes_Standard_F16s_fetching",
     "description": "Use Standard_F16s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F16s"
     ],
@@ -82,6 +98,8 @@
   {
     "_id": "nbCreateDeeplearning",
     "description": "Create Notebook Deep Learning",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-deeplearning"
     ],
@@ -92,6 +110,8 @@
   {
     "_id": "nbCreateJupyter",
     "description": "Create Notebook Jupyter",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-jupyter"
     ],
@@ -102,6 +122,8 @@
   {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-rstudio"
     ],
@@ -112,6 +134,8 @@
   {
     "_id": "nbCreateTensor",
     "description": "Create Notebook Jupyter with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-tensor"
     ],
@@ -122,6 +146,8 @@
   {
     "_id": "nbCreateZeppelin",
     "description": "Create Notebook Apache Zeppelin",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-zeppelin"
     ],
@@ -132,6 +158,8 @@
   {
     "_id": "nbCreateDataEngine",
     "description": "Create Data Engine",
+    "type": "COMPUTATIONAL",
+    "cloud": "AZURE",
     "computationals": [
       "docker.dlab-dataengine"
     ],
@@ -142,6 +170,8 @@
   {
     "_id": "compShapes_Standard_F4s_fetching",
     "description": "Use Standard_F4s instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_F4s"
     ],
@@ -152,6 +182,8 @@
   {
     "_id": "compShapes_Standard_E4s_v3_fetching",
     "description": "Use Standard_E4s_v3 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_E4s_v3"
     ],
@@ -162,6 +194,8 @@
   {
     "_id": "compShapes_Standard_E16s_v3_fetching",
     "description": "Use Standard_E16s_v3 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_E16s_v3"
     ],
@@ -172,6 +206,8 @@
   {
     "_id": "compShapes_Standard_E32s_v3_fetching",
     "description": "Use Standard_E32s_v3 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_E32s_v3"
     ],
@@ -182,6 +218,8 @@
   {
     "_id": "compShapes_Standard_F8s_fetching",
     "description": "Use Standard_F8s instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_F8s"
     ],
@@ -192,6 +230,8 @@
   {
     "_id": "compShapes_Standard_F16s_fetching",
     "description": "Use Standard_F16s instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_F16s"
     ],
@@ -202,6 +242,8 @@
   {
     "_id": "compShapes_Standard_NC6_fetching",
     "description": "Use Standard_NC6 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_NC6"
     ],
@@ -212,6 +254,8 @@
   {
     "_id": "nbBillingReportFull",
     "description": "View full billing report for all users",
+    "type": "BILLING",
+    "cloud": "AZURE",
     "pages": [
       "/api/infrastructure_provision/billing"
     ],
@@ -220,12 +264,33 @@
     ]
   },
   {
+    "_id": "projectAdmin",
+    "description": "Allow to execute administration operation per project",
+    "type": "ADMINISTRATION",
+    "cloud": "AZURE",
+    "pages": [
+      "environment/*",
+      "/roleManagement",
+      "/api/settings",
+      "/user/settings",
+      "/api/project",
+      "/api/endpoint"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "admin",
     "description": "Allow to execute administration operation",
+    "type": "ADMINISTRATION",
+    "cloud": "AZURE",
     "pages": [
       "environment/*",
       "/api/infrastructure/backup",
       "/roleManagement",
+      "/roleManagement/create",
+      "/roleManagement/delete",
       "/api/settings",
       "/user/settings",
       "/api/project",
diff --git a/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json b/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
index cf7b398..d2ef6dd 100644
--- a/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
@@ -2,6 +2,8 @@
   {
     "_id": "nbShapes_n1-highcpu-2_fetching",
     "description": "Use n1-highcpu-2 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highcpu-2"
     ],
@@ -12,6 +14,8 @@
   {
     "_id": "nbShapes_n1-highcpu-8_fetching",
     "description": "Use n1-highcpu-8 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highcpu-8"
     ],
@@ -22,6 +26,8 @@
   {
     "_id": "nbShapes_n1-highcpu-32_fetching",
     "description": "Use n1-highcpu-32 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highcpu-32"
     ],
@@ -32,6 +38,8 @@
   {
     "_id": "nbShapes_n1-highmem-4_fetching",
     "description": "Use n1-highmem-4 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highmem-4"
     ],
@@ -42,6 +50,8 @@
   {
     "_id": "nbShapes_n1-highmem-16_fetching",
     "description": "Use n1-highmem-16 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highmem-16"
     ],
@@ -52,6 +62,8 @@
   {
     "_id": "nbShapes_n1-highmem-32_fetching",
     "description": "Use n1-highmem-32 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highmem-32"
     ],
@@ -62,6 +74,8 @@
   {
     "_id": "nbShapes_n1-standard-2_fetching",
     "description": "Use n1-standard-2 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-standard-2"
     ],
@@ -72,6 +86,8 @@
   {
     "_id": "nbCreateDeeplearning",
     "description": "Create Notebook Deep Learning",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-deeplearning"
     ],
@@ -82,6 +98,8 @@
   {
     "_id": "nbCreateJupyter",
     "description": "Create Notebook Jupyter",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-jupyter"
     ],
@@ -90,8 +108,34 @@
     ]
   },
   {
+    "_id": "nbCreateJupyterLab",
+    "description": "Create Notebook JupyterLab",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
+    "exploratories": [
+      "docker.dlab-jupyterlab"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
+    "_id": "nbCreateSuperset",
+    "description": "Create Notebook Superset",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
+    "exploratories": [
+      "docker.dlab-superset"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-rstudio"
     ],
@@ -102,6 +146,8 @@
   {
     "_id": "nbCreateTensor",
     "description": "Create Notebook Jupyter with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-tensor"
     ],
@@ -112,6 +158,8 @@
   {
     "_id": "nbCreateTensorRstudio",
     "description": "Create Notebook RStudio with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-tensor-rstudio"
     ],
@@ -122,6 +170,8 @@
   {
     "_id": "nbCreateZeppelin",
     "description": "Create Notebook Apache Zeppelin",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-zeppelin"
     ],
@@ -132,6 +182,8 @@
   {
     "_id": "nbCreateDataEngine",
     "description": "Create Data Engine",
+    "type": "COMPUTATIONAL",
+    "cloud": "GCP",
     "computationals": [
       "docker.dlab-dataengine"
     ],
@@ -142,6 +194,8 @@
   {
     "_id": "nbCreateDataEngineService",
     "description": "Create Data Engine Service",
+    "type": "COMPUTATIONAL",
+    "cloud": "GCP",
     "computationals": [
       "docker.dlab-dataengine-service"
     ],
@@ -152,6 +206,8 @@
   {
     "_id": "compShapes_n1-standard-2_fetching",
     "description": "Use n1-standard-2 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-standard-2"
     ],
@@ -162,6 +218,8 @@
   {
     "_id": "compShapes_n1-highmem-4_fetching",
     "description": "Use n1-highmem-4 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highmem-4"
     ],
@@ -172,6 +230,8 @@
   {
     "_id": "compShapes_n1-highmem-16_fetching",
     "description": "Use n1-highmem-16 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highmem-16"
     ],
@@ -182,6 +242,8 @@
   {
     "_id": "compShapes_n1-highmem-32_fetching",
     "description": "Use n1-highmem-32 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highmem-32"
     ],
@@ -192,6 +254,8 @@
   {
     "_id": "compShapes_n1-highcpu-8_fetching",
     "description": "Use n1-highcpu-8 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highcpu-8"
     ],
@@ -202,6 +266,8 @@
   {
     "_id": "compShapes_n1-highcpu-2_fetching",
     "description": "Use n1-highcpu-2 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highcpu-2"
     ],
@@ -212,6 +278,8 @@
   {
     "_id": "compShapes_n1-highcpu-32_fetching",
     "description": "Use n1-highcpu-32 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highcpu-32"
     ],
@@ -222,6 +290,8 @@
   {
     "_id": "nbBillingReportFull",
     "description": "View full billing report for all users",
+    "type": "BILLING",
+    "cloud": "GCP",
     "pages": [
       "/api/infrastructure_provision/billing"
     ],
@@ -230,11 +300,12 @@
     ]
   },
   {
-    "_id": "admin",
-    "description": "Allow to execute administration operation",
+    "_id": "projectAdmin",
+    "description": "Allow to execute administration operation per project",
+    "type": "ADMINISTRATION",
+    "cloud": "GCP",
     "pages": [
       "environment/*",
-      "/api/infrastructure/backup",
       "/roleManagement",
       "/api/settings",
       "/user/settings",
@@ -244,5 +315,26 @@
     "groups": [
       "$anyuser"
     ]
+  },
+  {
+    "_id": "admin",
+    "description": "Allow to execute administration operation",
+    "type": "ADMINISTRATION",
+    "cloud": "GCP",
+    "pages": [
+      "environment/*",
+      "/api/infrastructure/backup",
+      "/roleManagement",
+      "/roleManagement/create",
+      "/roleManagement/delete",
+      "/api/settings",
+      "/user/settings",
+      "/api/project",
+      "/api/project/create",
+      "/api/endpoint"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
   }
 ]
diff --git a/services/self-service/src/main/resources/webapp/angular.json b/services/self-service/src/main/resources/webapp/angular.json
index 95c1cc0..32b79b3 100644
--- a/services/self-service/src/main/resources/webapp/angular.json
+++ b/services/self-service/src/main/resources/webapp/angular.json
@@ -26,14 +26,18 @@
               },
               "src/styles.scss",
               "src/assets/styles/app-loading.scss",
-              "node_modules/ngx-toastr/toastr.css"
+              "node_modules/ngx-toastr/toastr.css",
+              "node_modules/swagger-ui-dist/swagger-ui.css"
             ],
             "stylePreprocessorOptions": {
               "includePaths": [
                 "src/assets/styles"
               ]
             },
-            "scripts": []
+            "scripts": [
+              "node_modules/swagger-ui-dist/swagger-ui-bundle.js",
+              "node_modules/swagger-ui-dist/swagger-ui-standalone-preset.js"
+            ]
           },
           "configurations": {
             "production": {
diff --git a/services/self-service/src/main/resources/webapp/browserslist b/services/self-service/src/main/resources/webapp/browserslist
index 6382d82..ae0116f 100644
--- a/services/self-service/src/main/resources/webapp/browserslist
+++ b/services/self-service/src/main/resources/webapp/browserslist
@@ -1,21 +1,21 @@
 # *****************************************************************************
 #
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
 #
-#   http://www.apache.org/licenses/LICENSE-2.0
+#  http://www.apache.org/licenses/LICENSE-2.0
 #
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
 #
 # ******************************************************************************
 
diff --git a/services/self-service/src/main/resources/webapp/package-lock.json b/services/self-service/src/main/resources/webapp/package-lock.json
index 9b9ab8c..f8cfcc8 100644
--- a/services/self-service/src/main/resources/webapp/package-lock.json
+++ b/services/self-service/src/main/resources/webapp/package-lock.json
@@ -4945,7 +4945,7 @@
       "dependencies": {
         "abbrev": {
           "version": "1.1.1",
-          "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+          "resolved": false,
           "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
           "dev": true,
           "optional": true
@@ -4959,14 +4959,14 @@
         },
         "aproba": {
           "version": "1.2.0",
-          "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz",
+          "resolved": false,
           "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==",
           "dev": true,
           "optional": true
         },
         "are-we-there-yet": {
           "version": "1.1.5",
-          "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz",
+          "resolved": false,
           "integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==",
           "dev": true,
           "optional": true,
@@ -4977,14 +4977,14 @@
         },
         "balanced-match": {
           "version": "1.0.0",
-          "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
+          "resolved": false,
           "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
           "dev": true,
           "optional": true
         },
         "brace-expansion": {
           "version": "1.1.11",
-          "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+          "resolved": false,
           "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
           "dev": true,
           "optional": true,
@@ -4995,7 +4995,7 @@
         },
         "chownr": {
           "version": "1.1.1",
-          "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.1.tgz",
+          "resolved": false,
           "integrity": "sha512-j38EvO5+LHX84jlo6h4UzmOwi0UgW61WRyPtJz4qaadK5eY3BTS5TY/S1Stc3Uk2lIM6TPevAlULiEJwie860g==",
           "dev": true,
           "optional": true
@@ -5030,7 +5030,7 @@
         },
         "debug": {
           "version": "4.1.1",
-          "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+          "resolved": false,
           "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
           "dev": true,
           "optional": true,
@@ -5040,7 +5040,7 @@
         },
         "deep-extend": {
           "version": "0.6.0",
-          "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+          "resolved": false,
           "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
           "dev": true,
           "optional": true
@@ -5054,14 +5054,14 @@
         },
         "detect-libc": {
           "version": "1.0.3",
-          "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz",
+          "resolved": false,
           "integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=",
           "dev": true,
           "optional": true
         },
         "fs-minipass": {
           "version": "1.2.5",
-          "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.5.tgz",
+          "resolved": false,
           "integrity": "sha512-JhBl0skXjUPCFH7x6x61gQxrKyXsxB5gcgePLZCwfyCGGsTISMoIeObbrvVeP6Xmyaudw4TT43qV2Gz+iyd2oQ==",
           "dev": true,
           "optional": true,
@@ -5095,7 +5095,7 @@
         },
         "glob": {
           "version": "7.1.3",
-          "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz",
+          "resolved": false,
           "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==",
           "dev": true,
           "optional": true,
@@ -5117,7 +5117,7 @@
         },
         "iconv-lite": {
           "version": "0.4.24",
-          "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+          "resolved": false,
           "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
           "dev": true,
           "optional": true,
@@ -5127,7 +5127,7 @@
         },
         "ignore-walk": {
           "version": "3.0.1",
-          "resolved": "https://registry.npmjs.org/ignore-walk/-/ignore-walk-3.0.1.tgz",
+          "resolved": false,
           "integrity": "sha512-DTVlMx3IYPe0/JJcYP7Gxg7ttZZu3IInhuEhbchuqneY9wWe5Ojy2mXLBaQFUQmo0AW2r3qG7m1mg86js+gnlQ==",
           "dev": true,
           "optional": true,
@@ -5155,7 +5155,7 @@
         },
         "ini": {
           "version": "1.3.5",
-          "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz",
+          "resolved": false,
           "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==",
           "dev": true,
           "optional": true
@@ -5196,7 +5196,7 @@
         },
         "minipass": {
           "version": "2.3.5",
-          "resolved": "https://registry.npmjs.org/minipass/-/minipass-2.3.5.tgz",
+          "resolved": false,
           "integrity": "sha512-Gi1W4k059gyRbyVUZQ4mEqLm0YIUiGYfvxhF6SIlk3ui1WVxMTGfGdQ2SInh3PDrRTVvPKgULkpJtT4RH10+VA==",
           "dev": true,
           "optional": true,
@@ -5207,7 +5207,7 @@
         },
         "minizlib": {
           "version": "1.2.1",
-          "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.2.1.tgz",
+          "resolved": false,
           "integrity": "sha512-7+4oTUOWKg7AuL3vloEWekXY2/D20cevzsrNT2kGWm+39J9hGTCBv8VI5Pm5lXZ/o3/mdR4f8rflAPhnQb8mPA==",
           "dev": true,
           "optional": true,
@@ -5227,14 +5227,14 @@
         },
         "ms": {
           "version": "2.1.1",
-          "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
+          "resolved": false,
           "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==",
           "dev": true,
           "optional": true
         },
         "needle": {
           "version": "2.3.0",
-          "resolved": "https://registry.npmjs.org/needle/-/needle-2.3.0.tgz",
+          "resolved": false,
           "integrity": "sha512-QBZu7aAFR0522EyaXZM0FZ9GLpq6lvQ3uq8gteiDUp7wKdy0lSd2hPlgFwVuW1CBkfEs9PfDQsQzZghLs/psdg==",
           "dev": true,
           "optional": true,
@@ -5246,7 +5246,7 @@
         },
         "node-pre-gyp": {
           "version": "0.12.0",
-          "resolved": "https://registry.npmjs.org/node-pre-gyp/-/node-pre-gyp-0.12.0.tgz",
+          "resolved": false,
           "integrity": "sha512-4KghwV8vH5k+g2ylT+sLTjy5wmUOb9vPhnM8NHvRf9dHmnW/CndrFXy2aRPaPST6dugXSdHXfeaHQm77PIz/1A==",
           "dev": true,
           "optional": true,
@@ -5276,14 +5276,14 @@
         },
         "npm-bundled": {
           "version": "1.0.6",
-          "resolved": "https://registry.npmjs.org/npm-bundled/-/npm-bundled-1.0.6.tgz",
+          "resolved": false,
           "integrity": "sha512-8/JCaftHwbd//k6y2rEWp6k1wxVfpFzB6t1p825+cUb7Ym2XQfhwIC5KwhrvzZRJu+LtDE585zVaS32+CGtf0g==",
           "dev": true,
           "optional": true
         },
         "npm-packlist": {
           "version": "1.4.1",
-          "resolved": "https://registry.npmjs.org/npm-packlist/-/npm-packlist-1.4.1.tgz",
+          "resolved": false,
           "integrity": "sha512-+TcdO7HJJ8peiiYhvPxsEDhF3PJFGUGRcFsGve3vxvxdcpO2Z4Z7rkosRM0kWj6LfbK/P0gu3dzk5RU1ffvFcw==",
           "dev": true,
           "optional": true,
@@ -5294,7 +5294,7 @@
         },
         "npmlog": {
           "version": "4.1.2",
-          "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz",
+          "resolved": false,
           "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==",
           "dev": true,
           "optional": true,
@@ -5345,7 +5345,7 @@
         },
         "osenv": {
           "version": "0.1.5",
-          "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz",
+          "resolved": false,
           "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==",
           "dev": true,
           "optional": true,
@@ -5363,14 +5363,14 @@
         },
         "process-nextick-args": {
           "version": "2.0.0",
-          "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz",
+          "resolved": false,
           "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==",
           "dev": true,
           "optional": true
         },
         "rc": {
           "version": "1.2.8",
-          "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
+          "resolved": false,
           "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
           "dev": true,
           "optional": true,
@@ -5392,7 +5392,7 @@
         },
         "readable-stream": {
           "version": "2.3.6",
-          "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
+          "resolved": false,
           "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
           "dev": true,
           "optional": true,
@@ -5408,7 +5408,7 @@
         },
         "rimraf": {
           "version": "2.6.3",
-          "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz",
+          "resolved": false,
           "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==",
           "dev": true,
           "optional": true,
@@ -5418,28 +5418,28 @@
         },
         "safe-buffer": {
           "version": "5.1.2",
-          "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+          "resolved": false,
           "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
           "dev": true,
           "optional": true
         },
         "safer-buffer": {
           "version": "2.1.2",
-          "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+          "resolved": false,
           "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
           "dev": true,
           "optional": true
         },
         "sax": {
           "version": "1.2.4",
-          "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
+          "resolved": false,
           "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==",
           "dev": true,
           "optional": true
         },
         "semver": {
           "version": "5.7.0",
-          "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.0.tgz",
+          "resolved": false,
           "integrity": "sha512-Ya52jSX2u7QKghxeoFGpLwCtGlt7j0oY9DYb5apt9nPlJ42ID+ulTXESnt/qAQcoSERyZ5sl3LDIOw0nAn/5DA==",
           "dev": true,
           "optional": true
@@ -5472,7 +5472,7 @@
         },
         "string_decoder": {
           "version": "1.1.1",
-          "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+          "resolved": false,
           "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
           "dev": true,
           "optional": true,
@@ -5499,7 +5499,7 @@
         },
         "tar": {
           "version": "4.4.8",
-          "resolved": "https://registry.npmjs.org/tar/-/tar-4.4.8.tgz",
+          "resolved": false,
           "integrity": "sha512-LzHF64s5chPQQS0IYBn9IN5h3i98c12bo4NCO7e0sGM2llXQ3p2FGC5sdENN4cTW48O915Sh+x+EXx7XW96xYQ==",
           "dev": true,
           "optional": true,
@@ -5522,7 +5522,7 @@
         },
         "wide-align": {
           "version": "1.1.3",
-          "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz",
+          "resolved": false,
           "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==",
           "dev": true,
           "optional": true,
@@ -5539,7 +5539,7 @@
         },
         "yallist": {
           "version": "3.0.3",
-          "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.3.tgz",
+          "resolved": false,
           "integrity": "sha512-S+Zk8DEWE6oKpV+vI3qWkaK+jSbIK86pCwe2IF/xwIpQ8jEuxpw9NyaGjmp9+BoJv5FV2piqCDcoCtStppiq2A==",
           "dev": true,
           "optional": true
@@ -9720,6 +9720,11 @@
         "has-flag": "^3.0.0"
       }
     },
+    "swagger-ui-dist": {
+      "version": "3.24.3",
+      "resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-3.24.3.tgz",
+      "integrity": "sha512-kB8qobP42Xazaym7sD9g5mZuRL4416VIIYZMqPEIskkzKqbPLQGEiHA3ga31bdzyzFLgr6Z797+6X1Am6zYpbg=="
+    },
     "symbol-observable": {
       "version": "1.2.0",
       "resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-1.2.0.tgz",
diff --git a/services/self-service/src/main/resources/webapp/package.json b/services/self-service/src/main/resources/webapp/package.json
index 70b98ea..1284d50 100644
--- a/services/self-service/src/main/resources/webapp/package.json
+++ b/services/self-service/src/main/resources/webapp/package.json
@@ -29,16 +29,17 @@
     "@angular/router": "^8.2.6",
     "core-js": "3.2.1",
     "guacamole-common-js": "^1.1.0",
+    "hammerjs": "^2.0.8",
     "moment": "^2.24.0",
     "moment-timezone": "^0.5.26",
     "ng-daterangepicker": "^1.1.0",
     "ngx-toastr": "^10.2.0",
     "rxjs": "6.5.3",
     "rxjs-compat": "6.5.3",
+    "swagger-ui-dist": "^3.24.3",
     "tslib": "^1.10.0",
     "web-animations-js": "^2.3.2",
-    "zone.js": "~0.9.1",
-    "hammerjs": "^2.0.8"
+    "zone.js": "~0.9.1"
   },
   "devDependencies": {
     "@angular-devkit/build-angular": "~0.803.5",
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.html
index 735bf16..08b030f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.html
@@ -26,7 +26,7 @@
   <div class="dialog-content tabs">
     <div class="content-box">
       <mat-tab-group #tabGroup>
-        <mat-tab label="Connect ENDPOINT">
+        <mat-tab label="CONNECT ENDPOINT">
           <div class="split">
             <form [formGroup]="createEndpointForm" novalidate>
               <div class="control-group">
@@ -35,19 +35,24 @@
                   <input type="text" formControlName="name" placeholder="Enter endpoint name"
                     (blur)="generateEndpointTag($event)">
                   <span class="error"
-                    *ngIf="!createEndpointForm?.controls.name.valid && createEndpointForm?.controls.name.touched">
+                    *ngIf="!createEndpointForm?.controls.name.valid && createEndpointForm?.controls.name.touched && !createEndpointForm?.controls['name'].hasError('isDuplicate')">
                     Endpoint name can only contain letters, numbers, hyphens and '_' but can not end with special
                     characters
                   </span>
+                  <span class="error"
+                        *ngIf="createEndpointForm?.controls['name'].hasError('isDuplicate')">
+                    This endpoint name already exists.
+                  </span>
                 </div>
               </div>
               <div class="control-group">
                 <label class="label">Endpoint url</label>
                 <div class="control">
                   <input type="text" formControlName="url" placeholder="Enter endpoint url">
+                  <span class="error" *ngIf="createEndpointForm?.controls['url'].hasError('isDuplicate')">This endpoint url already exists.</span>
                   <span class="error"
-                    *ngIf="!createEndpointForm?.controls.url.valid && createEndpointForm.controls.url.touched">
-                    Endpoint url should end with slash
+                        *ngIf="!createEndpointForm?.controls.url.valid && createEndpointForm.controls.url.touched && !createEndpointForm?.controls['url'].hasError('isDuplicate')">
+                    Please provide a valid endpoint url with slash in the end.
                   </span>
                 </div>
               </div>
@@ -58,7 +63,7 @@
                   <span class="error"
                     *ngIf="!createEndpointForm?.controls.account.valid && createEndpointForm.controls.account.touched">
                     Endpoint account can only contain letters, numbers, hyphens and '_' but can not end with special
-                    characters
+                    characters.
                   </span>
                 </div>
               </div>
@@ -76,9 +81,19 @@
               </div>
             </form>
             <div class="action-group m-bott-10">
-              <button mat-raised-button type="button" class="butt action" (click)="dialogRef.close()">Cancel</button>
-              <button mat-raised-button type="button" [disabled]="!createEndpointForm.valid"
-                (click)="assignChanges(createEndpointForm.value)" class="butt butt-success action">Connect</button>
+              <button mat-raised-button
+                      type="button"
+                      [disabled]="!this.createEndpointForm.value.url || !createEndpointForm.valid"
+                      class="butt action"
+                      (click)="getEndpoinConnectionStatus(createEndpointForm.value.url)"
+              >
+                Test
+              </button>
+              <div class="action-butt">
+                <button mat-raised-button type="button" class="butt action" (click)="dialogRef.close()">Cancel</button>
+                <button mat-raised-button type="button" [disabled]="!createEndpointForm.valid"
+                  (click)="assignChanges(createEndpointForm.value)" class="butt butt-success action">Connect</button>
+              </div>
             </div>
           </div>
         </mat-tab>
@@ -92,7 +107,11 @@
 
               <ng-container matColumnDef="url">
                 <th mat-header-cell *matHeaderCellDef class="url"> Url </th>
-                <td mat-cell *matCellDef="let element"> {{element.url}} </td>
+                <td mat-cell
+                    *matCellDef="let element"
+                >
+                  <span matTooltip="{{element.url}}" matTooltipPosition="above">{{element.url}}</span>
+                </td>
               </ng-container>
 
               <ng-container matColumnDef="account">
@@ -108,7 +127,10 @@
               <ng-container matColumnDef="actions">
                 <th mat-header-cell *matHeaderCellDef class="actions"></th>
                 <td mat-cell *matCellDef="let element" class="actions">
-                  <span (click)="deleteEndpoint(element)">
+                  <span (click)="getEndpoinConnectionStatus(element.url)" matTooltip="Test" matTooltipPosition="above">
+                    <mat-icon>compare_arrows</mat-icon>
+                  </span>
+                  <span (click)="deleteEndpoint(element)" matTooltip="Disconnect" matTooltipPosition="above">
                     <mat-icon>delete_forever</mat-icon>
                   </span>
                 </td>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.scss
index 3b5418e..a23554f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.scss
@@ -16,72 +16,84 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+.endpoints-dialog {
+  .content-box {
+    padding: 10px 30px 30px;
+    height: 400px;
 
-.content-box {
-  padding: 10px 30px 30px;
-  height: 400px;
+    .split {
+      display: flex;
+      flex-direction: column;
+      justify-content: space-between;
+      height: 100%;
 
-  .split {
-    display: flex;
-    flex-direction: column;
-    justify-content: space-between;
-    height: 100%;
+      form {
+        padding: 20px 10px;
 
-    form {
-      padding: 20px 10px;
-
-      .control-group {
-        .error {
-          position: absolute;
-          right: 0;
-          bottom: 5px;
-          font-family: 'Open Sans', sans-serif;
-          font-weight: 300;
-        }
-      }
-    }
-  }
-
-  .action-group {
-    text-align: center;
-  }
-
-  .endpoints {
-    height: 265px;
-
-    table {
-      width: 100%;
-
-      tr {
-        td {
-          vertical-align: middle;
-        }
-      }
-
-      .actions {
-        color: #607d8b;
-        width: 10%;
-        text-align: center;
-
-        span {
-          transition: all .5s ease-in-out;
-          cursor: pointer;
-
-          .mat-icon {
-            font-size: 18px;
-            padding-top: 5px;
-          }
-
-          &:hover {
-            color: darken(#607d8b, 10%);
+        .control-group {
+          .error {
+            position: absolute;
+            right: 0;
+            bottom: 5px;
+            font-family: 'Open Sans', sans-serif;
+            font-weight: 300;
+            top: 34px;
           }
         }
       }
     }
 
-    .content {
-      p {
-        margin-bottom: 30px;
+    .action-group {
+      display: flex;
+      justify-content: space-between;
+      padding: 0 10px;
+    }
+
+    .endpoints {
+      height: 265px;
+
+      table.mat-table {
+        width: 100%;
+        thead{
+          background: transparent;
+          .mat-header-row{
+            background-clip:padding-box;
+          }
+        }
+        tr {
+          td {
+            vertical-align: middle;
+            max-width: 150px;
+            overflow: hidden;
+            text-overflow: ellipsis;
+          }
+        }
+
+        .actions {
+          color: #607d8b;
+          text-align: center;
+          width: 14%;
+
+          span {
+            transition: all .5s ease-in-out;
+            cursor: pointer;
+
+            .mat-icon {
+              font-size: 18px;
+              padding-top: 5px;
+            }
+
+            &:hover {
+              color: darken(#607d8b, 10%);
+            }
+          }
+        }
+      }
+
+      .content {
+        p {
+          margin-bottom: 30px;
+        }
       }
     }
   }
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts
index 23eab1c..45c6a23 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts
@@ -25,6 +25,7 @@
 import { EndpointService } from '../../../core/services';
 import { NotificationDialogComponent } from '../../../shared/modal-dialog/notification-dialog';
 import { PATTERNS } from '../../../core/util';
+import { map } from 'rxjs/operators';
 
 export interface Endpoint {
   name: string;
@@ -48,7 +49,7 @@
     public dialogRef: MatDialogRef<EndpointsComponent>,
     public dialog: MatDialog,
     private endpointService: EndpointService,
-    private _fb: FormBuilder
+    private _fb: FormBuilder,
   ) { }
 
   ngOnInit() {
@@ -62,31 +63,168 @@
 
   public assignChanges(data) {
     this.endpointService.createEndpoint(data).subscribe(() => {
-      this.toastr.success('Endpoint created successfully!', 'Success!');
+      this.toastr.success('Endpoint connected successfully!', 'Success!');
       this.dialogRef.close(true);
-    }, error => this.toastr.error(error.message || 'Endpoint creation failed!', 'Oops!'));
+    }, error => this.toastr.error(error.message || 'Endpoint connection failed!', 'Oops!'));
   }
 
-  public deleteEndpoint(data) {
-    this.dialog.open(NotificationDialogComponent, { data: { type: 'confirmation', item: data }, panelClass: 'modal-sm' })
-      .afterClosed().subscribe(result => {
-        result && this.endpointService.deleteEndpoint(data.name).subscribe(() => {
-          this.toastr.success('Endpoint successfully deleted!', 'Success!');
-          this.getEndpointList();
-        }, error => this.toastr.error(error.message || 'Endpoint creation failed!', 'Oops!'));
-      });
+  public deleteEndpoint(data): void {
+    this.endpointService.getEndpointsResource(data.name)
+      .pipe(map(resource =>
+        resource.projects.map(project =>
+          EndpointsComponent.createResourceList(
+            project.name,
+            resource.exploratories.filter(notebook => notebook.project === project.name),
+            project.endpoints.filter(endpoint => endpoint.name === data.name)[0].status))
+          .filter(project => project.nodeStatus !== 'TERMINATED'
+            && project.nodeStatus !== 'TERMINATING'
+            && project.nodeStatus !== 'FAILED'
+          )))
+      .subscribe((resource: any) => {
+         this.dialog.open(NotificationDialogComponent, { data: {
+           type: 'confirmation', item: data, list:  resource
+           }, panelClass: 'modal-sm' })
+         .afterClosed().subscribe(result => {
+         result === 'noTerminate' && this.deleteEndpointOption(data, false);
+         result === 'terminate' && this.deleteEndpointOption(data, true);
+       });
+    });
+  }
+
+  public getEndpoinConnectionStatus(url) {
+    const getStatus = this.endpointService.getEndpoinConnectionStatus(encodeURIComponent(url));
+    this.dialog.open(EndpointTestResultDialogComponent, { data: {url: url, getStatus}, panelClass: 'modal-sm' });
+  }
+
+  private static createResourceList(name: string, resource: Array<any>, nodeStatus: string): Object {
+    return {name, resource, nodeStatus};
   }
 
   private initFormModel(): void {
     this.createEndpointForm = this._fb.group({
-      name: ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.namePattern)])],
-      url: ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.url)])],
+      name: ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.namePattern), this.validateName.bind(this)])],
+      url: ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.fullUrl), this.validateUrl.bind(this)])],
       account: ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.namePattern)])],
       endpoint_tag: ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.namePattern)])]
     });
   }
 
-  private getEndpointList() {
+  private deleteEndpointOption(data, option): void {
+    this.endpointService.deleteEndpoint(`${data.name}?with-resources=${option}`).subscribe(() => {
+      this.toastr.success(option ? 'Endpoint successfully disconnected. All related resources are terminating!' : 'Endpoint successfully disconnected!' , 'Success!');
+      this.getEndpointList();
+    }, error => this.toastr.error(error.message || 'Endpoint creation failed!', 'Oops!'));
+  }
+
+  private getEndpointList(): void {
     this.endpointService.getEndpointsData().subscribe((endpoints: any) => this.endpoints = endpoints);
   }
+
+  private validateUrl(control) {
+    if (control && control.value) {
+      const isDublicat = this.endpoints.some(endpoint => endpoint['url'].toLocaleLowerCase() === control.value.toLowerCase());
+      return isDublicat ? { isDuplicate: true } : null;
+    }
+  }
+
+  private validateName(control) {
+    if (control && control.value) {
+      const isDublicat = this.endpoints.some(endpoint => endpoint['name'].toLocaleLowerCase() === control.value.toLowerCase());
+      return isDublicat ? { isDuplicate: true } : null;
+    }
+  }
+}
+
+@Component({
+  selector: 'endpoint-test-result-dialog',
+  template: `
+    <div id="dialog-box">
+      <div class="dialog-header">
+        <h4 class="modal-title">Endpoint test</h4>
+        <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
+      </div>
+      <div class="progress-bar" >
+        <mat-progress-bar *ngIf="!response" mode="indeterminate"></mat-progress-bar>
+      </div>
+      <div class="content-box">
+      <div mat-dialog-content class="content message">
+        <p
+          class="dialog-message ellipsis"
+          *ngIf="!response">
+          Connecting to url
+          <span class="strong"
+                matTooltip="{{data.url}}"
+                [matTooltipPosition]="'above'"
+          >
+            {{cutToLongUrl(data.url)}}
+          </span>
+        </p>
+        <p
+          class="dialog-message ellipsis"
+          *ngIf="isConnected && response">
+          <i class="material-icons icons-possition active">check_circle</i>
+          Connected to url
+          <span matTooltip="{{data.url}}"
+                [matTooltipPosition]="'above'"
+                class="strong"
+          >
+            {{cutToLongUrl(data.url)}}
+          </span>
+        </p>
+        <p class="dialog-message ellipsis"
+           *ngIf="!isConnected && response"
+        >
+          <i class="material-icons icons-possition failed">cancel</i>
+          Failed to connect to url
+          <span matTooltip="{{data.url}}"
+                [matTooltipPosition]="'above'"
+                class="strong"
+          >
+            {{cutToLongUrl(data.url)}}
+          </span>
+        </p>
+      </div>
+      <div class="text-center m-top-20 m-bott-10">
+        <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">Close</button>
+      </div>
+      </div>
+    </div>
+  `,
+  styles: [
+    `#dialog-box {overflow: hidden}
+    .icons-possition {line-height: 25px; vertical-align: middle; padding-right: 7px }
+    .content { color: #718ba6; padding: 15px 50px; font-size: 14px; font-weight: 400; margin: 0; }
+    .info .confirm-dialog { color: #607D8B; }
+    header { display: flex; justify-content: space-between; color: #607D8B; }
+    header h4 i { vertical-align: bottom; }
+    header a i { font-size: 20px; }
+    header a:hover i { color: #35afd5; cursor: pointer; }
+    label { font-size: 15px; font-weight: 500; font-family: "Open Sans",sans-serif; cursor: pointer; display: flex; align-items: center;}
+    .progress-bar{ height: 4px;}
+    .dialog-message{min-height: 25px; overflow: hidden;}
+    `
+  ]
+})
+export class EndpointTestResultDialogComponent {
+  public isConnected = false;
+  public response = false;
+  constructor(
+    public dialogRef: MatDialogRef<EndpointTestResultDialogComponent>,
+    @Inject(MAT_DIALOG_DATA) public data: any
+  ) {
+    this.data.getStatus.subscribe(() => {
+        this.isConnected = true;
+        this.response = true;
+        return;
+      },
+      () => {
+        this.isConnected = false;
+        this.response = true;
+        return;
+      });
+  }
+  private cutToLongUrl(url) {
+    return url.length > 25 ? url.slice(0, 25) + '...' : url;
+  }
+
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/index.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/index.ts
index bb6e960..28c47a6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/index.ts
@@ -36,7 +36,7 @@
 import { DirectivesModule } from '../../core/directives';
 
 import { SsnMonitorComponent } from './ssn-monitor/ssn-monitor.component';
-import { EndpointsComponent } from './endpoints/endpoints.component';
+import {EndpointsComponent, EndpointTestResultDialogComponent} from './endpoints/endpoints.component';
 import { ProjectModule } from '../project';
 
 export * from './management.component';
@@ -62,7 +62,8 @@
     ReconfirmationDialogComponent,
     ConfirmActionDialogComponent,
     SsnMonitorComponent,
-    EndpointsComponent
+    EndpointsComponent,
+    EndpointTestResultDialogComponent
   ],
   entryComponents: [
     ReconfirmationDialogComponent,
@@ -70,7 +71,9 @@
     BackupDilogComponent,
     SsnMonitorComponent,
     EndpointsComponent,
-    ManageEnvironmentComponent],
+    ManageEnvironmentComponent,
+    EndpointTestResultDialogComponent
+  ],
   providers: [EnvironmentsDataService],
   exports: [ManagementComponent]
 })
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
index 4e2ce1d..74ff5af 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
@@ -19,7 +19,7 @@
 
 <div id="dialog-box" class="manage-env-dialog">
   <header class="dialog-header">
-    <h4 class="modal-title">Manage environment</h4>
+    <h4 class="modal-title">Manage DLab quotas</h4>
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </header>
   <div class="dialog-content">
@@ -30,43 +30,70 @@
             <mat-list-item class="list-header">
               <div class="username">Project</div>
               <div class="quotes">Limit</div>
-              <div class="action">Actions</div>
+<!--              <div class="action">Actions</div>-->
             </mat-list-item>
             <div class="scrolling-content" id="scrolling" formArrayName="projects">
               <mat-list-item *ngFor="let item of usersEnvironments.controls; let i=index" [formGroupName]="i"
                 class="list-item">
-                <div class="username ellipsis"
-                  matTooltip="{{ manageUsersForm.controls['projects'].controls[i].value['project'] }}"
-                  matTooltipPosition="above">{{ manageUsersForm.controls['projects'].controls[i].value['project'] }}
+                <div class="username ellipsis">
+                  <span class="ellipsis"
+                  matTooltip="{{ manageUsersForm.controls['projects']['controls'][i].value['project'] }}"
+                  matTooltipPosition="above">{{ manageUsersForm.controls['projects']['controls'][i].value['project'] }}
+                    </span>
                 </div>
                 <div class="quotes">
-                  <input type="number" (keypress)="CheckUtils.isNumberKey($event)" min="0"
+                  <input type="number" (keypress)="CheckUtils.numberOnly($event)" min="0"
                     placeholder="Enter limit, in USD" formControlName="budget">
                   <span class="error"
-                    *ngIf="!manageUsersForm?.controls['projects'].controls[i].controls['budget'].valid && !manageUsersForm?.controls['projects'].controls[i].controls['budget'].hasError('overrun')">Only
-                    positive integers are allowed</span>
-                  <span class="error"
-                    *ngIf="manageUsersForm?.controls['projects'].controls[i].controls['budget'].hasError('overrun')">Per-user
+                    *ngIf="manageUsersForm?.controls['projects']['controls'][i].controls['budget'].hasError('overrun')">Per-user
                     quotes cannot be greater than total budget</span>
                 </div>
-                <div class="action">
-                  <span matTooltip="Stop" matTooltipPosition="above" (click)="applyAction('stop', item)"><i
-                      class="material-icons">pause_circle_outline</i>
-                  </span>
-                  <span matTooltip="Terminate" matTooltipPosition="above" (click)="applyAction('terminate', item)">
-                    <i class="material-icons">phonelink_off</i>
-                  </span>
-                </div>
+<!--                <div class="action">-->
+<!--                  <span-->
+<!--                    *ngIf="manageUsersForm?.controls['projects']['controls'][i].controls['canBeStopped'].value; else not_allowed_stop"-->
+<!--                    matTooltip="Stop" matTooltipPosition="above" (click)="applyAction('stop', item)">-->
+<!--                    <i class="material-icons">pause_circle_outline</i>-->
+<!--                  </span>-->
+<!--                  <ng-template #not_allowed_stop>-->
+<!--                    <span matTooltip="Unable to stop project because all resources are already stopped'"-->
+<!--                      matTooltipPosition="above" class="not-active">-->
+<!--                      <i class="material-icons">pause_circle_outline</i>-->
+<!--                    </span>-->
+<!--                  </ng-template>-->
+
+<!--                  <span-->
+<!--                    *ngIf="manageUsersForm?.controls['projects']['controls'][i].controls['canBeTerminated'].value; else not_allowed_terminate"-->
+<!--                    matTooltip="Terminate" matTooltipPosition="above" (click)="applyAction('terminate', item)">-->
+<!--                    <i class="material-icons">phonelink_off</i>-->
+<!--                  </span>-->
+<!--                  <ng-template #not_allowed_terminate>-->
+<!--                    <span matTooltip="Unable to terminate project because all resources are already terminated"-->
+<!--                      matTooltipPosition="above" class="not-active">-->
+<!--                      <i class="material-icons">phonelink_off</i>-->
+<!--                    </span>-->
+<!--                  </ng-template>-->
+
+<!--                </div>-->
               </mat-list-item>
             </div>
             <div class="control-group total-budget">
-              <label class="label">Total budget</label>
-              <div class="control">
-                <input type="number" (keypress)="CheckUtils.isNumberKey($event)" formControlName="total"
-                  placeholder="Enter total budget, in USD">
-                <span class="error" *ngIf="manageUsersForm?.controls['total'].hasError('overrun')">Total budget
-                  cannot be lower than a sum of users quotes</span>
-              </div>
+
+              <mat-list-item class="list-item">
+                <div class="username ellipsis">
+                  <span class="ellipsis">Total budget</span>
+                </div>
+                <div class="quotes">
+                  <input type="number" (keypress)="CheckUtils.numberOnly($event)" formControlName="total"
+                         placeholder="Enter total budget, in USD">
+                  <span class="error" *ngIf="manageUsersForm?.controls['total'].hasError('overrun')">Total budget cannot be lower than a sum of users quotes</span>
+                </div>
+<!--              <label class="username">Total budget</label>-->
+<!--              <div class="quotes">-->
+<!--                <input type="number" (keypress)="CheckUtils.numberOnly($event)" formControlName="total"-->
+<!--                  placeholder="Enter total budget, in USD">-->
+<!--                <span class="error" *ngIf="manageUsersForm?.controls['total'].hasError('overrun')">Total budget cannot be lower than a sum of users quotes</span>-->
+<!--              </div>-->
+              </mat-list-item>
             </div>
             <div class="text-center m-top-30">
               <button mat-raised-button type="button" (click)="dialogRef.close()" class="butt action">Cancel</button>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.scss
index 9cae733..0245506 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.scss
@@ -20,7 +20,7 @@
 .manage-env-dialog {
   .mat-list {
     .mat-list-item {
-      height: 60px;
+      height: 63px;
       margin: 5px 0;
       position: relative;
 
@@ -43,41 +43,15 @@
   }
 
   .quotes {
-    width: 40%;
+    width: 55%;
     margin-right: 10px;
     position: relative;
 
     .error {
       position: absolute;
       left: 0;
-      bottom: -14px;
-    }
-  }
-
-  .action {
-    width: 15%;
-
-    span {
-      padding: 3px;
-      cursor: pointer;
-
-      &:hover {
-        color: #35afd5;
-      }
-
-      i {
-        font-size: 20px;
-      }
-    }
-
-    .disabled {
-      cursor: not-allowed !important;
-      pointer-events: all;
-      opacity: .6;
-
-      &:hover {
-        color: #6b8299;
-      }
+      top: 34px;
+      font-family: 'Open Sans', sans-serif;
     }
   }
 
@@ -87,17 +61,18 @@
 
     .control {
       position: relative;
-      width: 56%;
+      width: 45%;
+      margin-right: 10px;
 
       .error {
         position: absolute;
         left: 0;
-        bottom: -14px;
+        bottom: -15px;
       }
     }
 
     .label {
-      width: 44%;
+      width: 55%;
     }
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.ts
index 60561ca..f99944f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.ts
@@ -48,8 +48,20 @@
   ngOnInit() {
     !this.manageUsersForm && this.initForm();
     this.setProjectsControl();
+    this.manageUsersForm.controls['total'].setValue(this.data.total.conf_max_budget || '');
+    this.onFormChange();
+  }
 
-    this.manageUsersForm.controls['total'].setValue(this.data.total.conf_max_budget || null);
+  public onFormChange() {
+    this.manageUsersForm.valueChanges.subscribe(value => {
+      if ((this.getCurrentTotalValue() && this.getCurrentTotalValue() >= this.getCurrentUsersTotal())) {
+        this.manageUsersForm.controls['projects']['controls'].forEach(v => {
+            v.controls['budget'].setErrors(null);
+        }
+        );
+        this.manageUsersForm.controls['total'].setErrors(null);
+      }
+    });
   }
 
   get usersEnvironments(): FormArray {
@@ -57,7 +69,11 @@
   }
 
   public setBudgetLimits(value) {
-    this.dialogRef.close(value);
+    if (this.getCurrentTotalValue() >= this.getCurrentUsersTotal() || !this.getCurrentTotalValue()) {
+      this.dialogRef.close(value);
+    } else {
+      this.manageUsersForm.controls['total'].setErrors({ overrun: true });
+    }
   }
 
   public applyAction(action, project) {
@@ -72,7 +88,10 @@
   public setProjectsControl() {
     this.manageUsersForm.setControl('projects',
       this._fb.array((this.data.projectsList || []).map((x: any) => this._fb.group({
-        project: x.name, budget: [x.budget, [Validators.min(0), this.userValidityCheck.bind(this)]], status: x.status
+        project: x.name,
+        budget: [x.budget, [ this.userValidityCheck.bind(this)]],
+        canBeStopped: x.canBeStopped,
+        canBeTerminated: x.canBeTerminated
       }))));
   }
 
@@ -112,11 +131,11 @@
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </div>
   <div mat-dialog-content class="content">
-    <p>Environment of <b>{{ data.project }}</b> will be
+    <p>Environment of <span class="strong">{{ data.project }}</span> will be
       <span *ngIf="data.action === 'terminate'"> terminated.</span>
       <span *ngIf="data.action === 'stop'">stopped.</span>
     </p>
-    <p class="m-top-20"><strong>Do you want to proceed?</strong></p>
+    <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
   </div>
   <div class="text-center">
     <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
index 3e04b97..9947516 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
@@ -20,7 +20,7 @@
 <div class="ani">
   <table mat-table [dataSource]="allFilteredEnvironmentData" class="data-grid management mat-elevation-z6">
     <ng-container matColumnDef="user">
-      <th mat-header-cell *matHeaderCellDef class="user">
+      <th mat-header-cell *matHeaderCellDef class="user label-header">
         <span class="label">User</span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
@@ -32,7 +32,7 @@
     </ng-container>
 
     <ng-container matColumnDef="project">
-      <th mat-header-cell *matHeaderCellDef class="project">
+      <th mat-header-cell *matHeaderCellDef class="project label-header">
         <span class="label">Project</span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
@@ -44,7 +44,7 @@
     </ng-container>
 
     <ng-container matColumnDef="type">
-      <th mat-header-cell *matHeaderCellDef class="type">
+      <th mat-header-cell *matHeaderCellDef class="type label-header">
         <span class="label">Type</span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
@@ -56,7 +56,7 @@
     </ng-container>
 
     <ng-container matColumnDef="shape">
-      <th mat-header-cell *matHeaderCellDef class="shape">
+      <th mat-header-cell *matHeaderCellDef class="shape label-header">
         <span class="label">Shape / Resource id</span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
@@ -68,7 +68,7 @@
     </ng-container>
 
     <ng-container matColumnDef="status">
-      <th mat-header-cell *matHeaderCellDef class="status">
+      <th mat-header-cell *matHeaderCellDef class="status label-header">
         <span class="label">Status</span>
 
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
@@ -77,12 +77,13 @@
             <span [hidden]="filtering && filterForm.statuses.length > 0 && !collapsedFilterRow">more_vert</span>
           </i>
         </button> </th>
-      <td mat-cell *matCellDef="let element" class="ani status" ngClass="{{element.status || ''}}">{{ element.status }}
+      <td mat-cell *matCellDef="let element" class="ani status label-header" >
+        <span ngClass="{{element.status || ''}}">{{ element.status }}</span>
       </td>
     </ng-container>
 
     <ng-container matColumnDef="resources">
-      <th mat-header-cell *matHeaderCellDef class="resources">
+      <th mat-header-cell *matHeaderCellDef class="resources label-header">
         <span class="label">Computational resources</span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
@@ -121,8 +122,10 @@
     </ng-container>
 
     <ng-container matColumnDef="actions">
-      <th mat-header-cell *matHeaderCellDef class=""></th>
-      <td mat-cell *matCellDef="let element" class=" settings">
+      <th mat-header-cell *matHeaderCellDef class="actions label-header">
+        <span class="label"> Actions </span>
+      </th>
+      <td mat-cell *matCellDef="let element" class="settings actions-col">
         <span #settings class="actions" (click)="actions.toggle($event, settings)" *ngIf="element.type !== 'edge node'"
           [ngClass]="{
             'disabled' : isActiveResources(element),
@@ -163,43 +166,43 @@
 
     <!-- FILTERING -->
     <ng-container matColumnDef="user-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'users'" [items]="filterConfiguration.users"
           [model]="filterForm.users"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="type-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <input placeholder="Filter by environment type" type="text" class="form-control filter-field"
           [value]="filterForm.type" (input)="filterForm.type = $event.target.value" />
       </th>
     </ng-container>
     <ng-container matColumnDef="project-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'projects'"
           [items]="filterConfiguration.projects" [model]="filterForm.projects"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="shape-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'shapes'"
           [items]="filterConfiguration.shapes" [model]="filterForm.shapes"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="status-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'statuses'"
           [items]="filterConfiguration.statuses" [model]="filterForm.statuses"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="resource-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'resources'"
           [items]="filterConfiguration.resources" [model]="filterForm.resources"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="actions-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef  class="actions-col filter-row-item">
         <div class="actions">
           <button mat-icon-button class="btn reset" (click)="resetFilterConfigurations()">
             <i class="material-icons">close</i>
@@ -224,7 +227,7 @@
       </td>
     </ng-container>
 
-    <tr mat-header-row *matHeaderRowDef="displayedColumns" class="header-row"></tr>
+    <tr mat-header-row *matHeaderRowDef="displayedColumns; sticky: true" class="header-row"></tr>
     <tr [hidden]="!collapsedFilterRow" mat-header-row *matHeaderRowDef="displayedFilterColumns; sticky: true"
       class="filter-row"></tr>
     <tr mat-row *matRowDef="let row; columns: displayedColumns;"></tr>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
index 30515a5..6c52559 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
@@ -23,7 +23,7 @@
     .user,
     .name,
     .project {
-      width: 14%;
+      width: 12%;
 
       .list-menu li {
         text-transform: inherit;
@@ -31,11 +31,11 @@
     }
 
     .shape {
-      width: 16% !important;
+      width: 19% !important;
     }
 
     .status {
-      width: 15% !important;
+      width: 13% !important;
     }
 
     .type {
@@ -43,20 +43,24 @@
     }
 
     .resources {
-      width: 25%;
+      width: 22%;
       padding: 5px;
     }
 
     .settings {
-      padding-right: 24px;
+      padding-right: 14px;
 
-      .actions {
-        margin-top: 2px;
+
+    }
+    .actions {
+      margin-top: 0px;
+      .label{
+        padding-right: 5px;
       }
     }
-
     .actions-col {
-      width: 10%;
+      width: 6%;
+
     }
 
     .dashboard_table_body {
@@ -71,29 +75,36 @@
   cursor: default !important;
 }
 
-table {
+table.management {
   width: 100%;
 
+  thead {
+    background: transparent !important;
+  }
+
   td {
     padding: 5px;
   }
 
   .header-row {
+    height: auto;
     .label {
       display: inline-block;
-      padding-top: 10px;
+      padding-top: 14px;
       vertical-align: super !important;
       padding-left: 5px;
-      font-size: 11px;
+      font-size: 12px;
     }
-
-    button {
-      padding-right: 5px;
+    .actions {
+      text-align: right;
+      .label {
+        display: inline-block;
+        padding-top: 11px;
+      }
     }
   }
 
-  .actions {
-    text-align: right;
+  .filter-row {
+    background: inherit;
   }
-
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.ts
index c94ed59..d0ab9dc 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.ts
@@ -27,6 +27,7 @@
 import { ConfirmationDialogComponent } from '../../../shared/modal-dialog/confirmation-dialog';
 import { EnvironmentsDataService } from '../management-data.service';
 import { EnvironmentModel, ManagementConfigModel } from '../management.model';
+import {ProgressBarService} from '../../../core/services/progress-bar.service';
 
 export interface ManageAction {
   action: string;
@@ -66,16 +67,25 @@
     private healthStatusService: HealthStatusService,
     private environmentsDataService: EnvironmentsDataService,
     public toastr: ToastrService,
-    public dialog: MatDialog
+    public dialog: MatDialog,
+    private progressBarService: ProgressBarService,
   ) { }
 
   ngOnInit() {
+  this.getEnvironmentData();
+  }
+
+  getEnvironmentData() {
+    setTimeout(() => {this.progressBarService.startProgressBar(); } , 0);
     this.environmentsDataService._data.subscribe(data => {
       if (data) {
         this.allEnvironmentData = EnvironmentModel.loadEnvironments(data);
         this.getDefaultFilterConfiguration(data);
         this.applyFilter(this.filterForm);
       }
+      this.progressBarService.stopProgressBar();
+    }, () => {
+      this.progressBarService.stopProgressBar();
     });
   }
 
@@ -152,11 +162,11 @@
 
       if (action === 'stop') {
         this.dialog.open(ConfirmationDialogComponent, {
-          data: { notebook: environment, type: type, manageAction: this.isAdmin }, panelClass: 'modal-md'
+          data: { notebook: environment, type: type, manageAction: true }, panelClass: 'modal-md'
         }).afterClosed().subscribe(() => this.buildGrid());
       } else if (action === 'terminate') {
         this.dialog.open(ConfirmationDialogComponent, {
-          data: { notebook: environment, type: ConfirmationDialogType.TerminateExploratory, manageAction: this.isAdmin }, panelClass: 'modal-md'
+          data: { notebook: environment, type: ConfirmationDialogType.TerminateExploratory, manageAction: true }, panelClass: 'modal-md'
         }).afterClosed().subscribe(() => this.buildGrid());
       } else if (action === 'run') {
         this.healthStatusService.runEdgeNode().subscribe(() => {
@@ -226,11 +236,11 @@
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </div>
   <div mat-dialog-content class="content">
-    <p>Resource <strong> {{ data.resource_name }}</strong> of user <strong> {{ data.user }} </strong> will be
+      <p>Resource <span class="strong"> {{ data.resource_name }}</span> of user <span class="strong"> {{ data.user }} </span> will be
       <span *ngIf="data.action === 'terminate'"> decommissioned.</span>
       <span *ngIf="data.action === 'stop'">stopped.</span>
     </p>
-    <p class="m-top-20"><strong>Do you want to proceed?</strong></p>
+    <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
   </div>
   <div class="text-center">
     <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
index 7f2d728..4c4bdae 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
@@ -27,7 +27,7 @@
         <i class="material-icons"></i>SSN Monitor
       </button> -->
       <button mat-raised-button class="butt env" (click)="openManageEnvironmentDialog()">
-        <i class="material-icons"></i>Manage environment
+        <i class="material-icons"></i>Manage DLab quotas
       </button>
       <!-- <button mat-raised-button class="butt" (click)="showBackupDialog()" [disabled]="creatingBackup">
         <i class="material-icons">backup</i>Backup
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.scss
index ee18db9..78497c6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.scss
@@ -34,4 +34,4 @@
       width: inherit;
     }
   }
-}
\ No newline at end of file
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
index 8e8b0b8..87e554d 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
@@ -48,7 +48,7 @@
 export class ManagementComponent implements OnInit {
   public user: string = '';
   public healthStatus: GeneralEnvironmentStatus;
-  public anyEnvInProgress: boolean = false;
+  // public anyEnvInProgress: boolean = false;
   public dialogRef: any;
 
   constructor(
@@ -78,6 +78,7 @@
       .environmentManagement(
         $event.environment.user,
         $event.action,
+        $event.environment.project,
         $event.environment.type === 'edge node' ? 'edge' : $event.environment.name,
         $event.resource ? $event.resource.computational_name : null
       ).subscribe(
@@ -85,9 +86,9 @@
         error => this.toastr.error('Environment management failed!', 'Oops!'));
   }
 
-  showBackupDialog() {
-    this.dialog.open(BackupDilogComponent, { panelClass: 'modal-sm' });
-  }
+  // showBackupDialog() {
+  //   this.dialog.open(BackupDilogComponent, { panelClass: 'modal-sm' });
+  // }
 
   showEndpointsDialog() {
     this.dialog.open(EndpointsComponent, { panelClass: 'modal-xl-s' })
@@ -97,23 +98,22 @@
   openManageEnvironmentDialog() {
     this.projectService.getProjectsList().subscribe(projectsList => {
       this.getTotalBudgetData().subscribe(total => {
-        this.dialogRef = this.dialog.open(ManageEnvironmentComponent, { data: { projectsList, total }, panelClass: 'modal-xl-s' });
-        this.dialogRef.componentInstance.manageEnv.subscribe((data) => this.manageEnvironment(data));
+        this.dialogRef = this.dialog.open(ManageEnvironmentComponent, { data: { projectsList, total }, panelClass: 'modal-sm' });
         this.dialogRef.afterClosed().subscribe(result => result && this.setBudgetLimits(result));
       }, () => this.toastr.error('Failed users list loading!', 'Oops!'));
     });
   }
 
-  openSsnMonitorDialog() {
-    this.dialog.open(SsnMonitorComponent, { panelClass: 'modal-lg' });
-  }
-
-  isEnvironmentsInProgress(exploratory): boolean {
-    return exploratory.some(item => {
-      return item.exploratory.some(el => el.status === 'creating' || el.status === 'starting' ||
-        el.resources.some(elem => elem.status === 'creating' || elem.status === 'starting' || elem.status === 'configuring'));
-    });
-  }
+  // openSsnMonitorDialog() {
+  //   this.dialog.open(SsnMonitorComponent, { panelClass: 'modal-lg' });
+  // }
+  //
+  // isEnvironmentsInProgress(exploratory): boolean {
+  //   return exploratory.some(item => {
+  //     return item.exploratory.some(el => el.status === 'creating' || el.status === 'starting' ||
+  //       el.resources.some(elem => elem.status === 'creating' || elem.status === 'starting' || elem.status === 'configuring'));
+  //   });
+  // }
 
   setBudgetLimits($event) {
     this.projectService.updateProjectsBudget($event.projects).subscribe((result: any) => {
@@ -126,47 +126,47 @@
     }, error => this.toastr.error(error.message, 'Oops!'));
   }
 
-  manageEnvironment(event: { action: string, project: any }) {
-    if (event.action === 'stop')
-      this.projectService.stopProjectAction(event.project.project_name)
-        .subscribe(() => this.handleSuccessAction(event.action), error => this.toastr.error(error.message, 'Oops!'));
+  // manageEnvironment(event: { action: string, project: any }) {
+  //   if (event.action === 'stop')
+  //     this.projectService.stopProjectAction(event.project.project_name)
+  //       .subscribe(() => this.handleSuccessAction(event.action), error => this.toastr.error(error.message, 'Oops!'));
+  //
+  //   if (event.action === 'terminate')
+  //     this.projectService.deleteProject(event.project.project_name)
+  //       .subscribe(() => this.handleSuccessAction(event.action), error => this.toastr.error(error.message, 'Oops!'));
+  // }
 
-    if (event.action === 'terminate')
-      this.projectService.deleteProject(event.project.project_name)
-        .subscribe(() => this.handleSuccessAction(event.action), error => this.toastr.error(error.message, 'Oops!'));
-  }
+  // handleSuccessAction(action) {
+  //   this.toastr.success(`Action ${action} is processing!`, 'Processing!');
+  //   this.projectService.getProjectsManagingList().subscribe(data => {
+  //     this.dialogRef.componentInstance.data.projectsList = data;
+  //     this.dialogRef.componentInstance.setProjectsControl();
+  //   });
+  //   this.buildGrid();
+  // }
+  //
+  // get creatingBackup(): boolean {
+  //   return this.backupService.inProgress;
+  // }
 
-  handleSuccessAction(action) {
-    this.toastr.success(`Action ${action} is processing!`, 'Processing!');
-    this.projectService.getProjectsList().subscribe(data => {
-      this.dialogRef.componentInstance.data.projectsList = data
-      this.dialogRef.componentInstance.setProjectsControl();
-    });
-    this.buildGrid()
-  }
-
-  get creatingBackup(): boolean {
-    return this.backupService.inProgress;
-  }
-
-  private getExploratoryList() {
-    this.userResourceService.getUserProvisionedResources()
-      .subscribe((result) => this.anyEnvInProgress = this.isEnvironmentsInProgress(
-        ExploratoryModel.loadEnvironments(result)));
-  }
+  // private getExploratoryList() {
+  //   this.userResourceService.getUserProvisionedResources()
+  //     .subscribe((result) => this.anyEnvInProgress = this.isEnvironmentsInProgress(
+  //       ExploratoryModel.loadEnvironments(result)));
+  // }
 
   private getEnvironmentHealthStatus() {
     this.healthStatusService
       .getEnvironmentStatuses()
       .subscribe((status: GeneralEnvironmentStatus) => {
         this.healthStatus = status;
-        this.getExploratoryList();
+        // this.getExploratoryList();
       });
   }
 
-  private getActiveUsersList() {
-    return this.healthStatusService.getActiveUsers();
-  }
+  // private getActiveUsersList() {
+  //   return this.healthStatusService.getActiveUsers();
+  // }
 
   private getTotalBudgetData() {
     return this.healthStatusService.getTotalBudgetData();
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts
index 86d553f..b4f0701 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts
@@ -27,6 +27,7 @@
     public ip: string,
     public type?: string,
     public project?: string,
+    public cloud_provider?: string
   ) { }
 
   public static loadEnvironments(data: Array<any>) {
@@ -40,6 +41,7 @@
         value.public_ip,
         value.resource_type,
         value.project,
+        value.cloud_provider
       ));
     }
   }
@@ -67,6 +69,7 @@
 
 export interface GeneralEnvironmentStatus {
   admin: boolean;
+  projectAdmin: boolean;
   billingEnabled: boolean;
   billingQuoteUsed: number;
   list_resources: any;
@@ -99,4 +102,4 @@
     this.statuses = [];
     this.resources = [];
   }
-}
\ No newline at end of file
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/index.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/index.ts
index f03b47e..609ea59 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/index.ts
@@ -30,6 +30,7 @@
 
 import { ProjectComponent, EditProjectComponent } from './project.component';
 import { ProjectDataService } from './project-data.service';
+import {BubbleModule} from "../../shared/bubble";
 
 @NgModule({
   imports: [
@@ -38,11 +39,12 @@
     ReactiveFormsModule,
     MaterialModule,
     FormControlsModule,
-    UnderscorelessPipeModule
+    UnderscorelessPipeModule,
+    BubbleModule
   ],
   declarations: [ProjectComponent, EditProjectComponent, ProjectFormComponent, ProjectListComponent],
   entryComponents: [EditProjectComponent],
   providers: [ProjectDataService],
   exports: [ProjectComponent]
 })
-export class ProjectModule { }
\ No newline at end of file
+export class ProjectModule { }
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
index ad877f2..014c89b 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
@@ -18,17 +18,20 @@
  */
 
 import { Injectable } from '@angular/core';
-import { BehaviorSubject } from 'rxjs';
+import { BehaviorSubject, of } from 'rxjs';
+import { mergeMap} from 'rxjs/operators';
 
-import { ProjectService } from '../../core/services';
+import { ProjectService, EndpointService } from '../../core/services';
 import { Project } from './project.component';
 
 @Injectable()
 export class ProjectDataService {
-
-  _projects = new BehaviorSubject<any>(null);
-
-  constructor(private projectService: ProjectService) {
+  public _projects = new BehaviorSubject<any>(null);
+  private endpointsList: any = [];
+  constructor(
+    private projectService: ProjectService,
+    private endpointService: EndpointService
+  ) {
     this.getProjectsList();
   }
 
@@ -37,7 +40,25 @@
   }
 
   private getProjectsList() {
-    this.projectService.getProjectsList().subscribe(
-      (response: Project[]) => this._projects.next(response));
+    this.endpointService.getEndpointsData().subscribe(list => this.endpointsList = list);
+    this.projectService.getProjectsList()
+      .pipe(
+        mergeMap ((response: Project[]) => {
+            if (response && this.endpointsList.length) {
+              response.forEach(project => project.endpoints.forEach(endpoint => {
+                const filtredEndpoints =  this.endpointsList.filter(v => v.name === endpoint.name);
+                if (filtredEndpoints.length) {
+                  endpoint.endpointStatus = this.endpointsList.filter(v => v.name === endpoint.name)[0].status;
+                } else {
+                  endpoint.endpointStatus = 'N/A';
+                }
+              }));
+            }
+          return of(response);
+        }))
+      .subscribe(
+        (response: Project[]) => {
+          return this._projects.next(response);
+        });
   }
-}
\ No newline at end of file
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.html
index c392b77..8c18309 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.html
@@ -19,8 +19,8 @@
 
 <form [formGroup]="projectForm" novalidate>
   <mat-horizontal-stepper #stepper class="stepper ani">
-    <mat-step>
-      <ng-template matStepLabel>Key upload</ng-template>
+    <mat-step [completed]='false'>
+      <ng-template matStepLabel >Key upload</ng-template>
       <section class="inner-step mat-reset upload-key">
         <div class="form-block split">
           <div class="row-wrap">
@@ -57,7 +57,7 @@
         </div>
       </section>
     </mat-step>
-    <mat-step>
+    <mat-step [completed]='false'>
       <ng-template matStepLabel>Project</ng-template>
       <section class="inner-step mat-reset">
 
@@ -77,7 +77,7 @@
                   Project name can only contain letters and numbers
                 </span>
                 <span class="error" *ngIf="projectForm?.controls.name.hasError('limit')">
-                  Project name cannot be longer than {{ DICTIONARY.max_project_name_length}} characters
+                  Project name cannot be longer than {{ maxProjectNameLength }} characters
                 </span>
               </div>
             </div>
@@ -92,7 +92,8 @@
               <label class="label">Endpoints</label>
               <div class="control selector-wrapper">
                 <mat-form-field>
-                  <mat-select multiple formControlName="endpoints" placeholder="Select endpoints">
+                  <mat-select multiple formControlName="endpoints" placeholder="Select endpoints"
+                    panelClass="crete-project-dialog">
                     <mat-option class="multiple-select" disabled>
                       <a class="select ani" (click)="selectOptions(endpointsList, 'endpoints', 'all')">
                         <i class="material-icons">playlist_add_check</i>&nbsp;All
@@ -128,7 +129,7 @@
 
       </section>
     </mat-step>
-    <mat-step>
+    <mat-step [completed]='false'>
       <ng-template matStepLabel>Groups</ng-template>
       <div class="inner-step mat-reset">
         <div class="form-block split">
@@ -136,7 +137,8 @@
             <label class="label">Groups</label>
             <div class="control selector-wrapper">
               <mat-form-field>
-                <mat-select multiple formControlName="groups" placeholder="Select user groups">
+                <mat-select multiple formControlName="groups" placeholder="Select user groups"
+                  panelClass="crete-project-dialog">
                   <mat-option class="multiple-select" disabled>
                     <a class="select ani" (click)="selectOptions(groupsList, 'groups', 'all')">
                       <i class="material-icons">playlist_add_check</i>&nbsp;All
@@ -158,6 +160,11 @@
             </div>
           </div>
           <div class="text-center m-bott-10">
+            <div class="control-group">
+              <mat-slide-toggle formControlName="shared_image_enabled" labelPosition="after">
+                <span class="hold-label">Use shared image</span>
+              </mat-slide-toggle>
+            </div>
             <button mat-raised-button type="button" class="butt" [disabled]="item" (click)="reset()">Clear</button>
             <button mat-raised-button matStepperPrevious class="butt"><i
                 class="material-icons">keyboard_arrow_left</i>Back</button>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.ts
index 009456c..683e7d7 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-form/project-form.component.ts
@@ -46,6 +46,7 @@
   public projectList: Project[] = [];
   public accessKeyValid: boolean;
   public keyLabel: string = '';
+  public maxProjectNameLength: number = 10;
 
   @Input() item: any;
   @Output() update: EventEmitter<{}> = new EventEmitter();
@@ -95,6 +96,7 @@
   }
 
   public reset() {
+    this.stepper.reset();
     this.keyLabel = '';
     this.initFormModel();
   }
@@ -141,25 +143,28 @@
     this.projectForm.controls[key].setValue(select ? filter : []);
   }
 
+
   private initFormModel(): void {
     this.projectForm = this._fb.group({
       'key': ['', Validators.required],
       'name': ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.projectName), this.checkDuplication.bind(this), this.providerMaxLength.bind(this)])],
       'endpoints': [[], Validators.required],
       'tag': ['', Validators.compose([Validators.required, Validators.pattern(PATTERNS.projectName)])],
-      'groups': [[], Validators.required]
+      'groups': [[], Validators.required],
+      'shared_image_enabled': [false, Validators.required]
     });
   }
 
-  public editSpecificProject(item: Project) {
-    let endpoints = item.endpoints.map((item: any) => item.name);
+  public editSpecificProject(item) {
+    const endpoints = item.endpoints.map((endpoint: any) => endpoint.name);
 
     this.projectForm = this._fb.group({
       'key': [''],
       'name': [item.name, Validators.required],
       'endpoints': [endpoints],
       'tag': [item.tag, Validators.required],
-      'groups': [item.groups, Validators.required]
+      'groups': [item.groups, Validators.required],
+      'shared_image_enabled': [item.sharedImageEnabled, Validators.required]
     });
   }
 
@@ -200,6 +205,6 @@
   }
 
   private providerMaxLength(control) {
-    return control.value.length <= DICTIONARY.max_project_name_length ? null : { limit: true };
+    return control.value.length <= this.maxProjectNameLength ? null : { limit: true };
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
index 70f2025..d8f697f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
@@ -24,7 +24,7 @@
   </ng-container>
 
   <ng-container matColumnDef="groups">
-    <th mat-header-cell *matHeaderCellDef class="groups"> Groups </th>
+    <th mat-header-cell *matHeaderCellDef class="groups"> Group </th>
     <td mat-cell *matCellDef="let element" class="groups">
       <mat-chip-list>
         <mat-chip *ngFor="let group of element.groups">{{ group }}</mat-chip>
@@ -35,59 +35,71 @@
   <ng-container matColumnDef="endpoints">
     <th mat-header-cell *matHeaderCellDef class="endpoints">
       <span class="label-endpoint"> Endpoint </span>
-      <span class="label-status"> Endpoint status </span>
+      <span class="label-endpoint-status"> Endpoint status </span>
+      <span class="label-status"> Edge node status </span>
     </th>
     <td mat-cell *matCellDef="let element" class="source endpoints">
-      <!-- <mat-chip-list>
-        <mat-chip *ngFor="let endpoint of element.endpoints">{{ endpoint }}</mat-chip>
-      </mat-chip-list> -->
       <div *ngIf="!element.endpoints?.length; else list">
         <span *ngIf="!element.endpoints.length" class="no-details">no details</span>
       </div>
       <ng-template #list>
         <div *ngFor="let endpoint of element.endpoints" class="resource-wrap">
           <div class="resource-name">
-            <a class="detailed-link">
+            <a class="project-endpoint-name">
               {{ endpoint.name }}
             </a>
           </div>
-          <span ngClass="{{toEndpointStatus(endpoint.status).toLowerCase() || ''}}"
-            class="status resource-status">{{ toEndpointStatus(endpoint.status).toLowerCase() }}</span>
-          <div class="resource-actions">
-            <a class="start-stop-action">
-              <i class="material-icons" (click)="toggleEndpointAction(element, 'stop', endpoint)"
-                *ngIf="endpoint.status === 'RUNNING' || endpoint.status === 'STOPPING'"
-                [ngClass]="{'not-active' : endpoint.status === 'STOPPING'}">pause_circle_outline</i>
-            </a>
-            <a class="start-stop-action">
-              <i class="material-icons" (click)="toggleEndpointAction(element, 'start', endpoint)"
-                *ngIf="endpoint.status === 'STOPPED'">play_circle_outline</i>
-            </a>
-
-            <a class="remove_butt" (click)="toggleEndpointAction(element, 'terminate', endpoint)"
-              [ngClass]="{ 'disabled' : endpoint.status !== 'RUNNING' && endpoint.status !== 'STOPPED' }">
-              <i class="material-icons">highlight_off</i>
-            </a>
+          <div class="resource-status">
+            <span [ngClass]="{'active' : endpoint.endpointStatus === 'ACTIVE', 'failed': endpoint.endpointStatus === 'INACTIVE'}">
+              {{ endpoint.endpointStatus | titlecase }}
+            </span>
           </div>
+
+          <span class="status resource-status"
+            ngClass="{{endpoint.status.toLowerCase() || ''}}">{{ endpoint.status.toLowerCase() }}</span>
         </div>
       </ng-template>
     </td>
   </ng-container>
 
   <ng-container matColumnDef="actions">
-    <th mat-header-cell *matHeaderCellDef class="project-actions"></th>
-    <td mat-cell *matCellDef="let element" class="project-actions">
-      <span>
-        <a (click)="editProject(element)">
-          <mat-icon>mode_edit</mat-icon>
-        </a>
-      </span>
-      <span>
-        <a (click)="deleteProject(element)" class="action"
-          [ngClass]="{'not-allowed' : isInProgress(element) || !isActiveEndpoint(element) }">
-          <mat-icon>delete_forever</mat-icon>
-        </a>
-      </span>
+    <th mat-header-cell *matHeaderCellDef class="project-actions">
+      <span class="label"> Actions </span>
+    </th>
+    <td mat-cell *matCellDef="let element" class="settings">
+      <span #settings (click)="actions.toggle($event, settings)" class="actions"></span>
+      <bubble-up #actions class="list-menu" position="bottom-left" alternative="top-left">
+        <ul class="list-unstyled">
+          <div class="active-items"></div>
+          <li class="project-seting-item" *ngIf="areStoppedEndpoints(element)" (click)="openEdgeDialog('start', element)">
+            <i class="material-icons">play_circle_outline</i>
+            <a class="action">
+              Start edge node
+            </a>
+          </li>
+          <li class="project-seting-item" *ngIf="areStartedEndpoints(element)" (click)="openEdgeDialog('stop', element )">
+            <i class="material-icons">pause_circle_outline</i>
+            <a class="action" >
+              Stop edge node
+            </a>
+          </li>
+          <li class="project-seting-item " *ngIf="areStoppedEndpoints(element) || areStartedEndpoints(element)" (click)="openEdgeDialog('terminate', element)">
+            <i class="material-icons">phonelink_off</i>
+            <a class="action">
+              Terminate edge node
+            </a>
+          </li>
+          <li class="project-seting-item" (click)="editProject(element)">
+            <i class="material-icons">mode_edit</i>
+            <a >
+              Edit project
+            </a>
+          </li>
+        </ul>
+      </bubble-up>
+    </td>
+    <td class="settings">
+
     </td>
 
   </ng-container>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
index c37e628..efe9ba3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
@@ -30,26 +30,33 @@
   }
 
   .endpoints {
-    width: 25%;
+    width: 45%;
     padding: 15px 0;
 
     .resource-wrap {
       .resource-name {
-        width: 50%;
+        width: 30%;
+        padding-left: 0;
+      }
+      resource-name, resource-status{
+        width: 30%;
         padding-left: 0;
       }
     }
 
-    .label-endpoint {
+    .label-endpoint, .label-endpoint-status {
       display: inline-block;
-      width: 50%;
+      width: 30%;
     }
 
     .label-status {
-      padding-left: 5px;
+      width: 30%;
     }
 
     &.source {
+      &.endpoints .resource-wrap{
+        justify-content: flex-start;
+      }
       .no-details {
         padding-left: 0;
       }
@@ -57,7 +64,7 @@
   }
 
   .groups {
-    width: 25%;
+    width: 20%;
     padding: 10px 0;
   }
 
@@ -72,7 +79,13 @@
     vertical-align: top;
     padding: 10px 24px;
 
-    span {
+    &.mat-header-cell{
+      padding-top: 19px;
+      padding-right: 13px;
+      color: rgba(0,0,0,.54);
+    }
+
+    span:not(.mat-header-cell span) {
       transition: all .5s ease-in-out;
       cursor: pointer;
 
@@ -87,3 +100,48 @@
     }
   }
 }
+
+td.settings {
+  position: relative;
+  vertical-align: middle !important;
+  text-align: right;
+  .actions {
+    background-image: url(../../../../assets/svg/settings_icon.svg);
+    width: 16px;
+    height: 16px;
+    display: inline-block;
+    text-align: center;
+    cursor: pointer;
+  }
+}
+.project-seting-item{
+  display: flex;
+  padding: 10px;
+  align-items: center;
+  border-bottom: 1px solid #edf1f5;
+  cursor: pointer;
+  color: #577289;
+  &:hover{
+    color: #36afd5;
+    transition: all .45s ease-in-out;
+  }
+  a{
+    padding-left: 5px;
+  }
+}
+.material-icons {
+  font-size: 18px;
+  padding-top: 1px;
+}
+
+.list-menu{
+  min-width: 190px;
+}
+
+.project-endpoint-name{
+   color: #577289;
+}
+
+
+
+
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.ts
index f7235e6..9238b7f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.ts
@@ -17,15 +17,19 @@
  * under the License.
  */
 
-import { Component, OnInit, Output, EventEmitter, OnDestroy } from '@angular/core';
+import {Component, OnInit, Output, EventEmitter, OnDestroy, Inject} from '@angular/core';
 import { ToastrService } from 'ngx-toastr';
 import { MatTableDataSource } from '@angular/material/table';
 import { Subscription } from 'rxjs';
+import { MatDialogRef, MAT_DIALOG_DATA, MatDialog } from '@angular/material/dialog';
 
 import { ProjectDataService } from '../project-data.service';
-import { ProjectService } from '../../../core/services';
 import { Project, Endpoint } from '../project.component';
 import { CheckUtils } from '../../../core/util';
+import {ProgressBarService} from '../../../core/services/progress-bar.service';
+import {EdgeActionDialogComponent} from '../../../shared/modal-dialog/edge-action-dialog';
+import {EndpointService} from '../../../core/services';
+
 
 @Component({
   selector: 'project-list',
@@ -39,54 +43,57 @@
   projectList: Project[];
 
   @Output() editItem: EventEmitter<{}> = new EventEmitter();
-  @Output() deleteItem: EventEmitter<{}> = new EventEmitter();
   @Output() toggleStatus: EventEmitter<{}> = new EventEmitter();
-
   private subscriptions: Subscription = new Subscription();
 
   constructor(
     public toastr: ToastrService,
     private projectDataService: ProjectDataService,
-    private projectService: ProjectService
-  ) { }
+    private progressBarService: ProgressBarService,
+    @Inject(MAT_DIALOG_DATA) public data: any,
+    public dialogRef: MatDialogRef<ProjectListComponent>,
+    public dialog: MatDialog,
+  ) {
+  }
 
 
   ngOnInit() {
-    this.subscriptions.add(this.projectDataService._projects.subscribe((value: Project[]) => {
-      this.projectList = value;
-      if (value) this.dataSource = new MatTableDataSource(value)
-    }));
+    this.getProjectList();
   }
 
   ngOnDestroy() {
     this.subscriptions.unsubscribe();
   }
 
+  private getProjectList() {
+    setTimeout(() => {this.progressBarService.startProgressBar(); } , 0);
+    this.subscriptions.add(this.projectDataService._projects.subscribe((value: Project[]) => {
+      this.projectList = value;
+      if (value) this.dataSource = new MatTableDataSource(value);
+      this.progressBarService.stopProgressBar();
+    }, () => this.progressBarService.stopProgressBar()));
+  }
+
   public showActiveInstances(): void {
-    console.log(this.projectList);
     const filteredList = this.projectList.map(project => {
-      project.endpoints = project.endpoints.filter((endpoint: Endpoint) => endpoint.status !== 'TERMINATED' && endpoint.status !== 'TERMINATING' && endpoint.status !== 'FAILED')
+      project.endpoints = project.endpoints.filter((endpoint: Endpoint) => endpoint.status !== 'TERMINATED' && endpoint.status !== 'TERMINATING' && endpoint.status !== 'FAILED');
       return project;
-    })
+    });
 
     this.dataSource = new MatTableDataSource(filteredList);
   }
 
   public toggleEndpointAction(project, action, endpoint) {
-    this.toggleStatus.emit({ project, endpoint, action });
+    this.toggleStatus.emit({project, endpoint, action});
   }
 
   public editProject(item: Project[]) {
     this.editItem.emit(item);
   }
 
-  public deleteProject(item: Project[]) {
-    this.deleteItem.emit(item);
-  }
-
   public isInProgress(project) {
     if (project)
-      return project.endpoints.some(e => e.status !== 'RUNNING' && e.status !== 'STOPPED' && e.status !== 'TERMINATED' && e.status !== 'FAILED')
+      return project.endpoints.some(e => e.status !== 'RUNNING' && e.status !== 'STOPPED' && e.status !== 'TERMINATED' && e.status !== 'FAILED');
   }
 
   public isActiveEndpoint(project) {
@@ -97,4 +104,33 @@
   public toEndpointStatus(status) {
     return CheckUtils.endpointStatus[status] || status;
   }
-}
\ No newline at end of file
+
+  public openEdgeDialog(action, project) {
+      const endpoints = project.endpoints.filter(endpoint => {
+        if (action === 'stop') {
+          return endpoint.status === 'RUNNING';
+        }
+        if (action === 'start') {
+          return endpoint.status === 'STOPPED';
+        }
+        if (action === 'terminate') {
+          return endpoint.status === 'RUNNING' || endpoint.status === 'STOPPED';
+        }
+      });
+      this.dialog.open(EdgeActionDialogComponent, {data: {type: action, item: endpoints}, panelClass: 'modal-sm'})
+        .afterClosed().subscribe(endpoint => {
+        if (endpoint && endpoint.length) {
+        this.toggleStatus.emit({project, endpoint, action});
+        }
+      }, error => this.toastr.error(error.message || `Endpoint ${action} failed!`, 'Oops!')
+      );
+    }
+
+  public areStartedEndpoints(project) {
+    return project.endpoints.filter(endpoint => endpoint.status === 'RUNNING').length > 0;
+  }
+
+  public areStoppedEndpoints(project) {
+    return project.endpoints.filter(endpoint => endpoint.status === 'STOPPED').length > 0;
+  }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
index b4ba7df..3be7a10 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
@@ -20,8 +20,12 @@
 
 <div *ngIf="projectList" class="base-retreat">
   <div class="sub-nav">
-    <div>
-      <button mat-raised-button class="butt butt-create" (click)="createProject()" [disabled]="!projectList.length">
+    <div
+      matTooltip="Only admin can create new project."
+      matTooltipPosition="above"
+      [matTooltipDisabled]="healthStatus?.admin"
+    >
+      <button mat-raised-button class="butt butt-create" (click)="createProject()" [disabled]="!projectList.length || !healthStatus?.admin && healthStatus?.projectAdmin">
         <i class="material-icons">add</i>Create new
       </button>
     </div>
@@ -47,8 +51,7 @@
   </mat-card>
 
   <div [hidden]="!projectList.length">
-    <project-list (editItem)="editProject($event)" (deleteItem)="deleteProject($event)"
-      (toggleStatus)="toggleStatus($event)">
+    <project-list (editItem)="editProject($event)" (toggleStatus)="toggleStatus($event)">
     </project-list>
   </div>
 </div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
index 9c7dae6..d1b172e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
@@ -23,9 +23,10 @@
 import { ToastrService } from 'ngx-toastr';
 
 import { ProjectDataService } from './project-data.service';
-import { HealthStatusService, ProjectService } from '../../core/services';
+import {HealthStatusService, ProjectService, UserResourceService} from '../../core/services';
 import { NotificationDialogComponent } from '../../shared/modal-dialog/notification-dialog';
 import { ProjectListComponent } from './project-list/project-list.component';
+import {ExploratoryModel} from '../../resources/resources-grid/resources-grid.model';
 
 export interface Endpoint {
   name: string;
@@ -35,9 +36,10 @@
 
 export interface Project {
   name: string;
-  endpoints: Endpoint[];
+  endpoints: any;
   tag: string;
   groups: string[];
+  shared_image_enabled?: boolean;
 }
 
 @Component({
@@ -49,6 +51,7 @@
   projectList: Project[] = [];
   healthStatus: any;
   activeFiltering: boolean = false;
+  resources: any = [];
 
   private subscriptions: Subscription = new Subscription();
 
@@ -59,7 +62,8 @@
     public toastr: ToastrService,
     private projectService: ProjectService,
     private projectDataService: ProjectDataService,
-    private healthStatusService: HealthStatusService
+    private healthStatusService: HealthStatusService,
+    private userResourceService: UserResourceService
   ) { }
 
   ngOnInit() {
@@ -69,12 +73,20 @@
         if (value) this.projectList = value;
       }));
     this.refreshGrid();
+    this.getResources();
   }
 
   ngOnDestroy() {
     this.subscriptions.unsubscribe();
   }
 
+  private getResources() {
+    this.userResourceService.getUserProvisionedResources()
+      .subscribe((result: any) => {
+        this.resources = ExploratoryModel.loadEnvironments(result);
+      });
+  }
+
   refreshGrid() {
     this.projectDataService.updateProjects();
     this.activeFiltering = false;
@@ -84,7 +96,6 @@
     if (this.projectList.length)
       this.dialog.open(EditProjectComponent, { data: { action: 'create', item: null }, panelClass: 'modal-xl-s' })
         .afterClosed().subscribe(() => {
-          console.log('Create project');
           this.getEnvironmentHealthStatus();
         });
   }
@@ -103,37 +114,36 @@
       });
   }
 
-  public deleteProject($event) {
-    this.dialog.open(NotificationDialogComponent, { data: { type: 'confirmation', item: $event }, panelClass: 'modal-sm' })
-      .afterClosed().subscribe(result => {
-        result && this.projectService.deleteProject($event.name).subscribe(() => {
-          this.refreshGrid();
-        });
-      });
-  }
-
   public toggleStatus($event) {
-    const data = { 'project_name': $event.project.name, endpoint: $event.endpoint.name };
-
-    if ($event.action === 'stop' || $event.action === 'terminate') {
-      this.dialog.open(NotificationDialogComponent, {
-        data: {
-          type: 'confirmation',
-          item: $event.endpoint, action: $event.action === 'stop' ? 'stopped' : 'terminated'
-        }, panelClass: 'modal-sm'
-      })
-        .afterClosed().subscribe(result => {
-          result && this.toggleStatusRequest(data, $event.action);
-        }, error => this.toastr.error(error.message, 'Oops!'));
-    } else {
+    const data = { 'project_name': $event.project.name, endpoint: $event.endpoint.map(endpoint => endpoint.name)};
       this.toggleStatusRequest(data, $event.action);
-    }
   }
 
   private toggleStatusRequest(data, action) {
+    if ( action === 'terminate') {
+      const projectsResources = this.resources.filter(resource => resource.project === data.project_name );
+      const activeProjectsResources = projectsResources.length ? projectsResources[0].exploratory
+        .filter(expl => expl.status !== 'terminated' && expl.status !== 'terminating' && expl.status !== 'failed') : [];
+      let termResources = [];
+      data.endpoint.forEach(v => {
+        termResources = [...termResources, ...activeProjectsResources.filter(resource => resource.endpoint === v)];
+      });
+
+      this.dialog.open(NotificationDialogComponent, { data: {
+        type: 'terminateNode', item: {action: data, resources: termResources.map(resource => resource.name)}
+        }, panelClass: 'modal-sm' })
+        .afterClosed().subscribe(result => {
+        result && this.edgeNodeAction(data, action);
+      });
+    } else {
+      this.edgeNodeAction(data, action);
+    }
+  }
+
+  private edgeNodeAction(data, action) {
     this.projectService.toggleProjectStatus(data, action).subscribe(() => {
       this.refreshGrid();
-      this.toastr.success(`Endpoint ${this.toEndpointAction(action)} is in progress!`, 'Processing!');
+      this.toastr.success(`Edge node ${this.toEndpointAction(action)} is in progress!`, 'Processing!');
     }, error => this.toastr.error(error.message, 'Oops!'));
   }
 
@@ -144,9 +154,11 @@
 
   private toEndpointAction(action) {
     if (action === 'start') {
-      return 'connect';
+      return 'starting';
     } else if (action === 'stop') {
-      return 'disconnect';
+      return 'stopping';
+    } else if (action === 'terminate') {
+      return 'terminating';
     } else {
       return action;
     }
@@ -179,4 +191,4 @@
     public dialogRef: MatDialogRef<EditProjectComponent>,
     @Inject(MAT_DIALOG_DATA) public data: any
   ) { }
-}
\ No newline at end of file
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
index 72a2288..5c73329 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
@@ -19,8 +19,12 @@
 
 <div class="manage-roles base-retreat">
   <div class="sub-nav">
-    <div>
-      <button mat-raised-button class="butt add-group" (click)="stepperView = !stepperView">
+    <div matTooltip="Only admin can add group."
+         matTooltipPosition="above"
+         [matTooltipDisabled]="healthStatus?.admin"
+    >
+      <button mat-raised-button class="butt add-group" (click)="stepperView = !stepperView" [disabled]="!healthStatus?.admin"
+              >
         <i class="material-icons">people_outline</i>Add group
       </button>
     </div>
@@ -29,10 +33,10 @@
 
   <mat-card *ngIf="stepperView" class="m-top-10">
     <mat-horizontal-stepper #stepper class="stepper ani">
-      <mat-step>
+      <mat-step [completed]='false'>
         <ng-template matStepLabel>Groups</ng-template>
         <div class="inner-step mat-reset">
-          <input [validator]="groupValidarion()" type="text" placeholder="Enter group name" [(ngModel)]="setupGroup"
+          <input [validator]="groupValidation()" type="text" placeholder="Enter group name" [(ngModel)]="setupGroup"
             #setupGroupName="ngModel">
           <div class="error" *ngIf="setupGroupName.errors?.patterns && setupGroupName.dirty">Group name can only
             contain letters, numbers, hyphens and '_'</div>
@@ -45,7 +49,8 @@
               class="material-icons">keyboard_arrow_right</i></button>
         </div>
       </mat-step>
-      <mat-step>
+
+      <mat-step [completed]='false'>
         <ng-template matStepLabel>Users</ng-template>
         <div class="inner-step mat-reset">
           <input type="text" placeholder="Enter user login" [(ngModel)]="setupUser">
@@ -58,29 +63,19 @@
               class="material-icons">keyboard_arrow_right</i></button>
         </div>
       </mat-step>
-      <mat-step>
+
+      <mat-step [completed]='false'>
         <ng-template matStepLabel>Roles</ng-template>
         <div class="inner-step mat-reset roles">
           <div class="selector-wrapper">
-            <mat-form-field>
-              <mat-select multiple [compareWith]="compareObjects" name="roles" [(value)]="setupRoles"
-                disableOptionCentering placeholder="Select roles">
-                <mat-option class="multiple-select" disabled>
-                  <a class="select ani" (click)="selectAllOptions(setupRoles, rolesList)">
-                    <i class="material-icons">playlist_add_check</i>&nbsp;All
-                  </a>
-                  <a class="deselect ani" (click)="selectAllOptions(setupRoles)">
-                    <i class="material-icons">clear</i>&nbsp;None
-                  </a>
-                </mat-option>
-                <mat-option *ngFor="let role of rolesList" [value]="role">
-                  {{ role }}
-                </mat-option>
-              </mat-select>
-              <button class="caret">
-                <i class="material-icons">keyboard_arrow_down</i>
-              </button>
-            </mat-form-field>
+            <multi-level-select-dropdown
+              (selectionChange)="onUpdate($event)"
+              name="roles"
+              [items]="rolesList"
+              [model]="setupRoles"
+              [isAdmin]="healthStatus?.admin"
+            >
+            </multi-level-select-dropdown>
           </div>
         </div>
         <div class="text-center m-bott-10">
@@ -88,9 +83,10 @@
               class="material-icons">keyboard_arrow_left</i>Back</button>
           <button mat-raised-button (click)="resetDialog()" class="butt">Cancel</button>
           <button mat-raised-button (click)="manageAction('create', 'group')" class="butt butt-success"
-            [disabled]="!setupGroup || setupGroupName.errors?.patterns || setupGroupName.errors?.duplicate || !setupRoles.length > 0">Create</button>
+            [disabled]="!setupGroup || setupGroupName.errors?.patterns || setupGroupName.errors?.duplicate || !setupRoles.length">Create</button>
         </div>
       </mat-step>
+
     </mat-horizontal-stepper>
   </mat-card>
   <mat-divider></mat-divider>
@@ -106,27 +102,14 @@
         <th mat-header-cell *matHeaderCellDef class="roles"> Roles </th>
         <td mat-cell *matCellDef="let element" class="roles">
           <div class="inner-step mat-reset">
-            <div class="selector-wrapper-edit">
-              <mat-form-field class="select">
-                <mat-select multiple [compareWith]="compareObjects" name="selected_roles" disableOptionCentering
-                  [(value)]="element.selected_roles" placeholder="Select roles" class="roles-select">
-                  <mat-option class="multiple-select" disabled>
-                    <a class="select ani" (click)="selectAllOptions(element, rolesList, 'selected_roles')">
-                      <i class="material-icons">playlist_add_check</i>&nbsp;All
-                    </a>
-                    <a class="deselect ani" (click)="selectAllOptions(element, null, 'selected_roles')">
-                      <i class="material-icons">clear</i>&nbsp;None
-                    </a>
-                  </mat-option>
-                  <mat-option *ngFor="let role of rolesList" [value]="role">
-                    {{ role }}
-                  </mat-option>
-                </mat-select>
-                <button class="caret">
-                  <i class="material-icons">keyboard_arrow_down</i>
-                </button>
-              </mat-form-field>
-            </div>
+              <multi-level-select-dropdown
+                (selectionChange)="onUpdate($event)"
+                [type]="element.group"
+                [items]="rolesList"
+                [model]="element.selected_roles"
+                [isAdmin]="healthStatus?.admin"
+              >
+              </multi-level-select-dropdown>
           </div>
         </td>
       </ng-container>
@@ -155,15 +138,29 @@
       <ng-container matColumnDef="actions">
         <th mat-header-cell *matHeaderCellDef class="actions"></th>
         <td mat-cell *matCellDef="let element" class="actions">
-          <span (click)="manageAction('delete', 'group', element)" class="reset ani">
-            <mat-icon>delete_forever</mat-icon>
-          </span>
-          <span class="apply ani" matTooltip="Group cannot be updated without any selected role"
-            matTooltipPosition="above" [matTooltipDisabled]="element.selected_roles.length > 0"
-            [ngClass]="{ 'not-allowed' : !element.selected_roles.length }"
-            (click)="manageAction('update', 'group', element)">
-            <mat-icon>done</mat-icon>
-          </span>
+          <div class="actions-wrapper">
+            <span class="action-disabled"
+              matTooltip="Only admin can delete group."
+              matTooltipPosition="above"
+              [matTooltipDisabled]="healthStatus?.admin"
+            >
+              <span
+                (click)="manageAction('delete', 'group', element)"
+                class="reset ani"
+                [ngClass]="{ 'not-allowed' : !healthStatus?.admin}"
+
+              >
+              <mat-icon >delete_forever</mat-icon>
+            </span>
+            </span>
+
+            <span class="apply ani big-icon" matTooltip="Group cannot be updated without any selected role"
+              matTooltipPosition="above" [matTooltipDisabled]="element.selected_roles.length > 0"
+              [ngClass]="{ 'not-allowed' : !element.selected_roles.length || isGroupChanded(element)}"
+              (click)="manageAction('update', 'group', element)">
+              <mat-icon [ngClass]="{'big-icon': !isGroupChanded(element) && element.selected_roles.length}">done</mat-icon>
+            </span>
+          </div>
         </td>
       </ng-container>
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
index dd14655..1167084 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
@@ -88,17 +88,23 @@
   }
 }
 
+.mat-horizontal-content-container{
+  overflow: visible !important;
+}
+
 .selector-wrapper {
   display: flex;
   align-self: center;
   width: 490px;
   height: 36px;
-  padding-left: 10px;
+  padding-left: 0;
   font-family: 'Open Sans', sans-serif;
   font-size: 15px;
   font-weight: 300;
   box-shadow: 0 3px 1px -2px rgba(0, 0, 0, 0.2), 0 2px 2px 0 rgba(0, 0, 0, 0.14), 0 1px 5px 0 rgba(0, 0, 0, 0.12);
-
+  multi-level-select-dropdown{
+    width: 100%;
+  }
   mat-form-field {
     width: 100%;
 
@@ -137,7 +143,6 @@
 }
 
 .roles {
-  // width: 30%;
 
   .selector-wrapper-edit {
     position: relative;
@@ -198,6 +203,7 @@
   }
 }
 
+
 .expanded-panel {
   display: flex;
   align-items: flex-end;
@@ -343,11 +349,11 @@
   }
 
   .roles {
-    width: 30%;
+    width: 35%;
   }
 
   .users {
-    width: 40%;
+    width: 35%;
   }
 
   .actions {
@@ -355,13 +361,29 @@
     width: 10%;
     text-align: center;
 
+    .actions-wrapper{
+      height: 41px;
+      display: flex;
+      align-items: center;
+      justify-content: flex-end;
+    }
+
     span {
-      transition: all .5s ease-in-out;
+      transition: all .35s ease-in-out;
       cursor: pointer;
 
+      &.action-disabled{
+        cursor: not-allowed;
+      }
+
       .mat-icon {
         font-size: 18px;
         padding-top: 12px;
+        &.big-icon{
+          font-size: 25px;
+          padding-top: 10px;
+          transition: .25s;
+        }
       }
 
       &:hover {
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
index 18e2fd5..2b1e26b 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
@@ -21,10 +21,11 @@
 import { ValidatorFn, FormControl } from '@angular/forms';
 import { MatDialog, MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog';
 import { ToastrService } from 'ngx-toastr';
-
-import { RolesGroupsService, HealthStatusService } from '../../core/services';
+import {RolesGroupsService, HealthStatusService, ApplicationSecurityService, AppRoutingService} from '../../core/services';
 import { CheckUtils } from '../../core/util';
 import { DICTIONARY } from '../../../dictionary/global.dictionary';
+import {ProgressBarService} from '../../core/services/progress-bar.service';
+import {ConfirmationDialogComponent, ConfirmationDialogType} from '../../shared/modal-dialog/confirmation-dialog';
 
 @Component({
   selector: 'dlab-roles',
@@ -36,11 +37,11 @@
 
   public groupsData: Array<any> = [];
   public roles: Array<any> = [];
-  public rolesList: Array<string> = [];
+  public rolesList: Array<any> = [];
   public setupGroup: string = '';
   public setupUser: string = '';
   public manageUser: string = '';
-  public setupRoles: Array<string> = [];
+  public setupRoles: Array<any> = [];
   public updatedRoles: Array<string> = [];
   public healthStatus: any;
   public delimitersRegex = /[-_]?/g;
@@ -49,32 +50,43 @@
   stepperView: boolean = false;
   displayedColumns: string[] = ['name', 'roles', 'users', 'actions'];
   @Output() manageRolesGroupAction: EventEmitter<{}> = new EventEmitter();
+  private startedGroups: Array<any>;
 
   constructor(
     public toastr: ToastrService,
     public dialog: MatDialog,
     private rolesService: RolesGroupsService,
-    private healthStatusService: HealthStatusService
+    private healthStatusService: HealthStatusService,
+    private progressBarService: ProgressBarService,
+    private applicationSecurityService: ApplicationSecurityService,
+    private appRoutingService: AppRoutingService,
   ) { }
 
   ngOnInit() {
-    this.openManageRolesDialog();
     this.getEnvironmentHealthStatus();
   }
 
   openManageRolesDialog() {
+    setTimeout(() => {this.progressBarService.startProgressBar(); } , 0);
     this.rolesService.getGroupsData().subscribe(groups => {
       this.rolesService.getRolesData().subscribe(
         (roles: any) => {
           this.roles = roles;
-          this.rolesList = roles.map(role => role.description);
+          this.rolesList = roles.map((role) => {
+              return {role: role.description, type: role.type, cloud: role.cloud};
+          });
+          this.rolesList = this.rolesList.sort((a, b) => (a.cloud > b.cloud) ? 1 : ((b.cloud > a.cloud) ? -1 : 0));
+          this.rolesList = this.rolesList.sort((a, b) => (a.type > b.type) ? 1 : ((b.type > a.type) ? -1 : 0));
           this.updateGroupData(groups);
-
           this.stepperView = false;
         },
         error => this.toastr.error(error.message, 'Oops!'));
-    },
-      error => this.toastr.error(error.message, 'Oops!'));
+        this.progressBarService.stopProgressBar();
+      },
+      error => {
+      this.toastr.error(error.message, 'Oops!');
+      this.progressBarService.stopProgressBar();
+    });
   }
 
   getGroupsData() {
@@ -94,7 +106,7 @@
           action, type, value: {
             name: this.setupGroup,
             users: this.setupUser ? this.setupUser.split(',').map(elem => elem.trim()) : [],
-            roleIds: this.extractIds(this.roles, this.setupRoles)
+            roleIds: this.extractIds(this.roles, this.setupRoles.map(v => v.role))
           }
         });
       this.stepperView = false;
@@ -114,45 +126,73 @@
         }
       });
     } else if (action === 'update') {
-      this.manageRolesGroups({
-        action, type, value: {
-          name: item.group,
-          roleIds: this.extractIds(this.roles, item.selected_roles),
-          users: item.users || []
+      const currGroupSource = this.startedGroups.filter(cur => cur.group === item.group)[0];
+      let deletedUsers = currGroupSource.users.filter(user => {
+        return !item.users.includes(user);
+      });
+      this.dialog.open(ConfirmationDialogComponent, { data:
+          { notebook: deletedUsers, type: ConfirmationDialogType.deleteUser }, panelClass: 'modal-sm' })
+        .afterClosed().subscribe((res) => {
+        if (!res) {
+          item.users = [...currGroupSource.users];
+          item.selected_roles = [...currGroupSource.selected_roles];
+          item.roles = [...currGroupSource.roles];
+        } else {
+          const isSuperAdminGroup = this.startedGroups.filter(v => v.group === item.group)[0].roles.filter(role => role.description === 'Allow to execute administration operation').length;
+          const selectedRoles = isSuperAdminGroup ?
+            [...item.selected_roles.map(v => v.role), 'Allow to execute administration operation'] :
+            item.selected_roles.map(v => v.role);
+          this.manageRolesGroups({
+            action, type, value: {
+              name: item.group,
+              roleIds: this.extractIds(this.roles, selectedRoles),
+              users: item.users || []
+            }
+          });
         }
+        deletedUsers = [];
       });
     }
-    this.getEnvironmentHealthStatus();
     this.resetDialog();
   }
 
   public manageRolesGroups($event) {
     switch ($event.action) {
       case 'create':
-        this.rolesService.setupNewGroup($event.value).subscribe(res => {
+        this.rolesService.setupNewGroup($event.value).subscribe(() => {
           this.toastr.success('Group creation success!', 'Created!');
           this.getGroupsData();
         }, () => this.toastr.error('Group creation failed!', 'Oops!'));
         break;
+
       case 'update':
-        this.rolesService.updateGroup($event.value).subscribe(res => {
-          this.toastr.success('Group data successfully updated!', 'Success!');
-          this.getGroupsData();
-        }, () => this.toastr.error('Failed group data updating!', 'Oops!'));
+        this.rolesService.updateGroup($event.value).subscribe(() => {
+          this.toastr.success(`Group data is updated successfully!`, 'Success!');
+          if (!$event.value.roleIds.includes('admin' || 'projectAdmin')) {
+            this.applicationSecurityService.isLoggedIn().subscribe(() => {
+              this.getEnvironmentHealthStatus();
+            });
+          } else {
+            this.openManageRolesDialog();
+          }
+        }, (re) => this.toastr.error('Failed group data updating!', 'Oops!'));
+
         break;
+
       case 'delete':
         if ($event.type === 'users') {
-          this.rolesService.removeUsersForGroup($event.value).subscribe(res => {
+          this.rolesService.removeUsersForGroup($event.value).subscribe(() => {
             this.toastr.success('Users was successfully deleted!', 'Success!');
             this.getGroupsData();
           }, () => this.toastr.error('Failed users deleting!', 'Oops!'));
         } else if ($event.type === 'group') {
-          this.rolesService.removeGroupById($event.value).subscribe(res => {
+          this.rolesService.removeGroupById($event.value).subscribe(() => {
             this.toastr.success('Group was successfully deleted!', 'Success!');
             this.getGroupsData();
           }, (error) => this.toastr.error(error.message, 'Oops!'));
         }
         break;
+
       default:
     }
   }
@@ -165,14 +205,23 @@
   }
 
   public updateGroupData(groups) {
-    this.groupsData = groups;
-
+    this.groupsData = groups.map(v => {
+      if (!v.users) {
+        v.users = [];
+      }
+      return v;
+    }).sort((a, b) => (a.group > b.group) ? 1 : ((b.group > a.group) ? -1 : 0));
     this.groupsData.forEach(item => {
-      item.selected_roles = item.roles.map(role => role.description);
+        item.selected_roles = item.roles.map(role => ({role: role.description, type: role.type, cloud: role.cloud}));
     });
+    this.getGroupsListCopy();
   }
 
-  public groupValidarion(): ValidatorFn {
+  private getGroupsListCopy() {
+    this.startedGroups = JSON.parse(JSON.stringify(this.groupsData));
+  }
+
+  public groupValidation(): ValidatorFn {
     const duplicateList: any = this.groupsData.map(item => item.group.toLowerCase());
     return <ValidatorFn>((control: FormControl) => {
       if (control.value && duplicateList.includes(CheckUtils.delimitersFiltering(control.value.toLowerCase()))) {
@@ -186,8 +235,19 @@
     });
   }
 
-  public compareObjects(o1: any, o2: any): boolean {
-    return o1.toLowerCase() === o2.toLowerCase();
+  private isGroupChanded(currGroup) {
+    const currGroupSource = this.startedGroups.filter(cur => cur.group === currGroup.group)[0];
+   if (currGroup.users.length !== currGroupSource.users.length &&
+     currGroup.selected_roles.length !== currGroupSource.selected_roles.length) {
+     return false;
+   }
+   return JSON.stringify(currGroup.users) === JSON.stringify(currGroupSource.users) &&
+     JSON.stringify(
+       currGroup.selected_roles.map(role => role.role).sort()
+     ) === JSON
+       .stringify(
+         currGroupSource.selected_roles.map(role => role.role).sort()
+       );
   }
 
   public resetDialog() {
@@ -211,7 +271,23 @@
 
   private getEnvironmentHealthStatus() {
     this.healthStatusService.getEnvironmentHealthStatus()
-      .subscribe((result: any) => this.healthStatus = result);
+      .subscribe((result: any) => {
+        this.healthStatus = result;
+          if (!this.healthStatus.admin && !this.healthStatus.projectAdmin) {
+            this.appRoutingService.redirectToHomePage();
+          } else {
+            this.openManageRolesDialog();
+          }
+      }
+      );
+  }
+
+  public onUpdate($event): void {
+   if ($event.type) {
+     this.groupsData.filter(group => group.group === $event.type)[0].selected_roles = $event.model;
+   } else {
+     this.setupRoles = $event.model;
+   }
   }
 }
 
@@ -224,9 +300,9 @@
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </div>
   <div mat-dialog-content class="content">
-    <p *ngIf="data.user">User <strong>{{ data.user }}</strong> will be deleted from <strong>{{ data.group }}</strong> group.</p>
-    <p *ngIf="data.id">Group <strong class="ellipsis group-name">{{ data.group }}</strong> will be decommissioned.</p>
-    <p class="m-top-20"><strong>Do you want to proceed?</strong></p>
+    <p *ngIf="data.user">User <span class="strong">{{ data.user }}</span> will be deleted from <span class="strong">{{ data.group }}</span> group.</p>
+    <p *ngIf="data.id">Group <span class="ellipsis group-name strong">{{ data.group }}</span> will be decommissioned.</p>
+    <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
   </div>
   <div class="text-center">
     <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
@@ -235,6 +311,7 @@
   `,
   styles: [`.group-name { max-width: 96%; display: inline-block; vertical-align: bottom; }`]
 })
+
 export class ConfirmDeleteUserAccountDialogComponent {
   constructor(
     public dialogRef: MatDialogRef<ConfirmDeleteUserAccountDialogComponent>,
diff --git a/services/self-service/src/main/resources/webapp/src/app/app.module.ts b/services/self-service/src/main/resources/webapp/src/app/app.module.ts
index 680ae7c..e23e14a 100644
--- a/services/self-service/src/main/resources/webapp/src/app/app.module.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/app.module.ts
@@ -40,6 +40,7 @@
 import { AdministrationModule } from './administration/administration.module';
 import { WebterminalModule } from './webterminal';
 import { CoreModule } from './core/core.module';
+import { SwaggerAPIModule } from './swagger';
 
 @NgModule({
   declarations: [AppComponent],
@@ -57,6 +58,7 @@
     ReportingModule,
     AdministrationModule,
     WebterminalModule,
+    SwaggerAPIModule,
     RouterModule,
     AppRoutingModule,
     CoreModule.forRoot(),
diff --git a/services/self-service/src/main/resources/webapp/src/app/app.routing.module.ts b/services/self-service/src/main/resources/webapp/src/app/app.routing.module.ts
index 0473332..f2649fb 100644
--- a/services/self-service/src/main/resources/webapp/src/app/app.routing.module.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/app.routing.module.ts
@@ -21,7 +21,7 @@
 import { Routes, RouterModule } from '@angular/router';
 
 import { LoginComponent } from './login/login.module';
-import { LayoutComponent } from './layout/layout.component'
+import { LayoutComponent } from './layout/layout.component';
 import { ResourcesComponent } from './resources/resources.component';
 import { AccessNotebookGuideComponent, PublicKeyGuideComponent } from './help';
 import { NotFoundComponent } from './service-pages/not-found/not-found.component';
@@ -31,6 +31,7 @@
 import { ManagementComponent } from './administration/management/management.component';
 import { ProjectComponent } from './administration/project/project.component';
 import { RolesComponent } from './administration/roles/roles.component';
+import { SwaggerComponent } from './swagger/swagger.component';
 
 import { AuthorizationGuard, CheckParamsGuard, CloudProviderGuard, AdminGuard } from './core/services';
 
@@ -67,6 +68,10 @@
       component: ManagementComponent,
       canActivate: [AuthorizationGuard, AdminGuard]
     }, {
+    //   path: 'swagger',
+    //   component: SwaggerComponent,
+    //   canActivate: [AuthorizationGuard]
+    // }, {
       path: 'help/publickeyguide',
       component: PublicKeyGuideComponent,
       canActivate: [AuthorizationGuard]
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/directives/scrollTo.directive.ts b/services/self-service/src/main/resources/webapp/src/app/core/directives/scrollTo.directive.ts
index 08560b2..fb6e0c3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/directives/scrollTo.directive.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/directives/scrollTo.directive.ts
@@ -24,7 +24,7 @@
   selector: '[scrollTo]'
 })
 export class ScrollDirective {
-  constructor(private elRef: ElementRef) {}
+  constructor(private elRef: ElementRef) { }
 
   @Input()
   set scrollTo(condition) {
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
index cffcf03..75d2087 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
@@ -47,6 +47,7 @@
   private static readonly IMAGE = 'image';
   private static readonly SCHEDULER = 'scheduler';
   private static readonly TEMPLATES = 'templates';
+  private static readonly COMPUTATION_TEMPLATES = 'computation_templates';
   private static readonly COMPUTATIONAL_RESOURCES_TEMLATES = 'computational_templates';
   private static readonly COMPUTATIONAL_RESOURCES = 'computational_resources';
   private static readonly COMPUTATIONAL_RESOURCES_DATAENGINE = 'computational_resources_dataengine';
@@ -74,8 +75,8 @@
   private static readonly DOWNLOAD_REPORT = 'download_report';
   private static readonly SETTINGS = 'settings';
   private static readonly PROJECT = 'project';
-  private static readonly USER_PROJECT = 'user_project';
   private static readonly ENDPOINT = 'endpoint';
+  private static readonly ENDPOINT_CONNECTION = 'endpoint_connection';
 
   private requestRegistry: Dictionary<string>;
 
@@ -181,6 +182,12 @@
       null);
   }
 
+  public buildGetComputationTemplatesRequest(params, provider): Observable<any> {
+    return this.buildRequest(HTTPMethod.GET,
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATION_TEMPLATES) + params,
+      null);
+  }
+
   public buildCreateExploratoryEnvironmentRequest(data): Observable<any> {
     return this.buildRequest(HTTPMethod.PUT,
       this.requestRegistry.Item(ApplicationServiceFacade.EXPLORATORY_ENVIRONMENT),
@@ -188,6 +195,13 @@
       { responseType: 'text', observe: 'response' });
   }
 
+  public buildGetExploratoryEnvironmentRequest(): Observable<any> {
+    return this.buildRequest(HTTPMethod.GET,
+      this.requestRegistry.Item(ApplicationServiceFacade.EXPLORATORY_ENVIRONMENT),
+      null,
+      { observe: 'response' });
+  }
+
   public buildRunExploratoryEnvironmentRequest(data): Observable<any> {
     return this.buildRequest(HTTPMethod.POST,
       this.requestRegistry.Item(ApplicationServiceFacade.EXPLORATORY_ENVIRONMENT),
@@ -201,35 +215,35 @@
       data, { responseType: 'text', observe: 'response' });
   }
 
-  public buildCreateComputationalResources_DataengineServiceRequest(data): Observable<any> {
+  public buildCreateComputationalResources_DataengineServiceRequest(data, provider): Observable<any> {
     return this.buildRequest(HTTPMethod.PUT,
-      this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES_DATAENGINESERVICE),
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES_DATAENGINESERVICE),
       data,
       { observe: 'response' });
   }
 
-  public buildCreateComputationalResources_DataengineRequest(data): Observable<any> {
+  public buildCreateComputationalResources_DataengineRequest(data, provider): Observable<any> {
     return this.buildRequest(HTTPMethod.PUT,
-      this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES_DATAENGINE),
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES_DATAENGINE),
       data,
       { observe: 'response' });
   }
 
-  public buildDeleteComputationalResourcesRequest(data): Observable<any> {
+  public buildDeleteComputationalResourcesRequest(data, provider): Observable<any> {
     return this.buildRequest(HTTPMethod.DELETE,
-      this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES),
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES),
       data);
   }
 
-  public buildStopSparkClusterAction(data): Observable<any> {
+  public buildStopSparkClusterAction(data, provider): Observable<any> {
     return this.buildRequest(HTTPMethod.DELETE,
-      this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES),
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES),
       data);
   }
 
-  public buildStartSparkClusterAction(params): Observable<any> {
+  public buildStartSparkClusterAction(params, provider): Observable<any> {
     return this.buildRequest(HTTPMethod.PUT,
-      this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES) + params,
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES) + params,
       null);
   }
 
@@ -347,6 +361,7 @@
       data,
       { responseType: 'text', observe: 'response' });
   }
+
   public buildGetBackupStatusRequest(uuid): Observable<any> {
     return this.buildRequest(HTTPMethod.GET,
       this.requestRegistry.Item(ApplicationServiceFacade.BACKUP),
@@ -359,9 +374,9 @@
       image);
   }
 
-  public buildGetImagesList(): Observable<any> {
+  public buildGetImagesList(param): Observable<any> {
     return this.buildRequest(HTTPMethod.GET,
-      this.requestRegistry.Item(ApplicationServiceFacade.IMAGE),
+      this.requestRegistry.Item(ApplicationServiceFacade.IMAGE) + param,
       null);
   }
 
@@ -403,16 +418,6 @@
       null);
   }
 
-  public buildManageEnvironment(action, data): Observable<any> {
-    return this.buildRequest(HTTPMethod.POST,
-      this.requestRegistry.Item(ApplicationServiceFacade.ENV) + action,
-      data,
-      {
-        observe: 'response',
-        headers: { 'Content-Type': 'text/plain' }
-      });
-  }
-
   public buildGetAllEnvironmentData(): Observable<any> {
     return this.buildRequest(HTTPMethod.GET,
       this.requestRegistry.Item(ApplicationServiceFacade.FULL_ACTIVE_LIST),
@@ -497,15 +502,15 @@
       data);
   }
 
-  public buildGetClusterConfiguration(param): Observable<any> {
+  public buildGetClusterConfiguration(param, provider): Observable<any> {
     return this.buildRequest(HTTPMethod.GET,
-      this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES) + param,
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES) + param,
       null);
   }
 
-  public buildEditClusterConfiguration(param, data): Observable<any> {
+  public buildEditClusterConfiguration(param, data, provider): Observable<any> {
     return this.buildRequest(HTTPMethod.PUT,
-      this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES) + param,
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES) + param,
       data);
   }
 
@@ -547,13 +552,7 @@
 
   public buildGetUserProjectsList(params?): Observable<any> {
     return this.buildRequest(HTTPMethod.GET,
-      this.requestRegistry.Item(ApplicationServiceFacade.USER_PROJECT) + params,
-      null);
-  }
-
-  public buildDeleteProject(param): Observable<any> {
-    return this.buildRequest(HTTPMethod.DELETE,
-      this.requestRegistry.Item(ApplicationServiceFacade.PROJECT) + param,
+      this.requestRegistry.Item(ApplicationServiceFacade.PROJECT) + params,
       null);
   }
 
@@ -576,6 +575,12 @@
       null);
   }
 
+  public getEndpointsResource(endpoint): Observable<any> {
+    return this.buildRequest(HTTPMethod.GET,
+      this.requestRegistry.Item(ApplicationServiceFacade.ENDPOINT) + `/${endpoint}/resources`,
+      null);
+  }
+
   public buildCreateEndpoint(data): Observable<any> {
     return this.buildRequest(HTTPMethod.POST,
       this.requestRegistry.Item(ApplicationServiceFacade.ENDPOINT),
@@ -588,6 +593,12 @@
       null);
   }
 
+  public getEndpointConnectionStatus(endpointUrl): Observable<any> {
+    return this.buildRequest(HTTPMethod.GET,
+      this.requestRegistry.Item(ApplicationServiceFacade.ENDPOINT_CONNECTION) + endpointUrl,
+      null);
+  }
+
   private setupRegistry(): void {
     this.requestRegistry = new Dictionary<string>();
 
@@ -611,6 +622,8 @@
       '/api/infrastructure_provision/exploratory_environment');
     this.requestRegistry.Add(ApplicationServiceFacade.TEMPLATES,
       '/api/infrastructure_templates');
+    this.requestRegistry.Add(ApplicationServiceFacade.COMPUTATION_TEMPLATES,
+    '/infrastructure_provision/computational_resources');
     this.requestRegistry.Add(ApplicationServiceFacade.IMAGE,
       '/api/infrastructure_provision/exploratory_environment/image');
     this.requestRegistry.Add(ApplicationServiceFacade.SCHEDULER,
@@ -619,11 +632,11 @@
 
     // Computational Resources
     this.requestRegistry.Add(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES,
-      '/api/infrastructure_provision/computational_resources');
+      '/infrastructure_provision/computational_resources');
     this.requestRegistry.Add(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES_DATAENGINESERVICE,
-      '/api/infrastructure_provision/computational_resources/dataengine-service'); // emr(aws)
+      '/infrastructure_provision/computational_resources/dataengine-service'); // emr(aws)
     this.requestRegistry.Add(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES_DATAENGINE,
-      '/api/infrastructure_provision/computational_resources/dataengine'); // spark (azure|aws)
+      '/infrastructure_provision/computational_resources/dataengine'); // spark (azure|aws)
 
     this.requestRegistry.Add(ApplicationServiceFacade.COMPUTATIONAL_RESOURCES_TEMLATES,
       '/api/infrastructure_templates/computational_templates');
@@ -664,7 +677,7 @@
     // project
     this.requestRegistry.Add(ApplicationServiceFacade.PROJECT, '/api/project');
     this.requestRegistry.Add(ApplicationServiceFacade.ENDPOINT, '/api/endpoint');
-    this.requestRegistry.Add(ApplicationServiceFacade.USER_PROJECT, '/api/project/me');
+    this.requestRegistry.Add(ApplicationServiceFacade.ENDPOINT_CONNECTION, '/api/endpoint/url/');
   }
 
   private buildRequest(method: HTTPMethod, url_path: string, body: any, opt?) {
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
index 5dcef3e..5d35eec 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
@@ -28,26 +28,26 @@
 export class DataengineConfigurationService {
   constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
 
-  public getClusterConfiguration(exploratory, cluster): Observable<{}> {
-    const url = `/${exploratory}/${cluster}/config`;
+  public getClusterConfiguration(project, exploratory, cluster, provider): Observable<{}> {
+    const url = `/${project}/${exploratory}/${cluster}/config`;
     return this.applicationServiceFacade
-      .buildGetClusterConfiguration(url)
+      .buildGetClusterConfiguration(url, provider)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public editClusterConfiguration(data, exploratory, cluster): Observable<{}> {
-    const url = `/dataengine/${exploratory}/${cluster}/config`;
+  public editClusterConfiguration(data, project, exploratory, cluster, provider): Observable<{}> {
+    const url = `/dataengine/${project}/${exploratory}/${cluster}/config`;
     return this.applicationServiceFacade
-      .buildEditClusterConfiguration(url, data)
+      .buildEditClusterConfiguration(url, data, provider)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getExploratorySparkConfiguration(exploratory): Observable<{}> {
-    const url = `/${exploratory}/cluster/config`;
+  public getExploratorySparkConfiguration(project, exploratory): Observable<{}> {
+    const url = `/${project}/${exploratory}/cluster/config`;
     return this.applicationServiceFacade
       .buildGetExploratorySparkConfiguration(url)
       .pipe(
@@ -55,8 +55,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public editExploratorySparkConfiguration(data, exploratory): Observable<{}> {
-    const url = `/${exploratory}/reconfigure`;
+  public editExploratorySparkConfiguration(data, project, exploratory): Observable<{}> {
+    const url = `/${project}/${exploratory}/reconfigure`;
     return this.applicationServiceFacade
       .buildEditExploratorySparkConfiguration(url, data)
       .pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/endpoint.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/endpoint.service.ts
index 98801b4..ae522c5 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/endpoint.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/endpoint.service.ts
@@ -47,6 +47,14 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
+  public getEndpointsResource(endpoint): Observable<any> {
+    return this.applicationServiceFacade
+      .getEndpointsResource(endpoint)
+      .pipe(
+        map(response => response),
+        catchError(ErrorUtils.handleServiceError));
+  }
+
   public deleteEndpoint(data): Observable<any> {
     const url = `/${data}`;
     return this.applicationServiceFacade
@@ -55,5 +63,13 @@
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
   }
+
+  public getEndpoinConnectionStatus(endpointUrl): Observable<{}> {
+    return this.applicationServiceFacade
+      .getEndpointConnectionStatus(endpointUrl)
+      .pipe(
+        map(response => response),
+        catchError(ErrorUtils.handleServiceError));
+  }
 }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts
index d593d08..12086bc 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts
@@ -119,7 +119,7 @@
               this.appRoutingService.redirectToHomePage();
               return false;
             }
-            if (parameter === 'administration' && !data.admin) {
+            if (parameter === 'administration' && !data.admin && !data.projectAdmin) {
               this.appRoutingService.redirectToNoAccessPage();
               return false;
             }
@@ -136,15 +136,6 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public manageEnvironment(act, data): Observable<Response | {}> {
-    const action = `/${act}`;
-    return this.applicationServiceFacade
-      .buildManageEnvironment(action, data)
-      .pipe(
-        map(response => response),
-        catchError(ErrorUtils.handleServiceError));
-  }
-
   public getSsnMonitorData(): Observable<{}> {
     return this.applicationServiceFacade
       .buildGetSsnMonitorData()
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
index d4f8942..2119b1a 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
@@ -28,8 +28,8 @@
 export class LibrariesInstallationService {
   constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
 
-  public getGroupsList(exploratory, computational?): Observable<Response> {
-    let body = `?exploratory_name=${exploratory}`;
+  public getGroupsList(project, exploratory, computational?): Observable<Response> {
+    let body = `?project_name=${project}&exploratory_name=${exploratory}`;
     if (computational) body += `&computational_name=${computational}`;
 
     return this.applicationServiceFacade
@@ -65,8 +65,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getInstalledLibrariesList(exploratory): Observable<{}> {
-    const body = `?exploratory_name=${exploratory}`;
+  public getInstalledLibrariesList(project, exploratory): Observable<{}> {
+    const body = `?project_name=${project}&exploratory_name=${exploratory}`;
 
     return this.applicationServiceFacade
       .buildGetInstalledLibrariesList(body)
@@ -75,8 +75,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getInstalledLibsByResource(exploratory, computational?): Observable<{}> {
-    let body = `?exploratory_name=${exploratory}`;
+  public getInstalledLibsByResource(project, exploratory, computational?): Observable<{}> {
+    let body = `?project_name=${project}&exploratory_name=${exploratory}`;
     if (computational) body += `&computational_name=${computational}`;
 
     return this.applicationServiceFacade
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
index 4b759b6..2d21fd5 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
@@ -36,8 +36,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  environmentManagement(data, action: string, resource: string, computational?: string): Observable<{}> {
-    const params = computational ? `/${action}/${resource}/${computational}` : `/${action}/${resource}`;
+  environmentManagement(data, action: string, project: string, resource: string, computational?: string): Observable<{}> {
+    const params = computational ? `/${action}/${project}/${resource}/${computational}` : `/${action}/${project}/${resource}`;
     return this.applicationServiceFacade
       .buildEnvironmentManagement(params, data)
       .pipe(
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java b/services/self-service/src/main/resources/webapp/src/app/core/services/progress-bar.service.ts
similarity index 69%
copy from integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java
copy to services/self-service/src/main/resources/webapp/src/app/core/services/progress-bar.service.ts
index 4b70836..7e0dabb 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/progress-bar.service.ts
@@ -17,13 +17,22 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.exceptions;
+import { Injectable } from '@angular/core';
+import { Subject } from "rxjs";
 
-public class JenkinsException extends RuntimeException {
+@Injectable({
+  providedIn: 'root'
+})
+export class ProgressBarService {
+  public showProgressBar = new Subject();
 
-	private static final long serialVersionUID = 1L;
+  constructor() { }
 
-	public JenkinsException(String message) {
-		super(message);
-	}
+  public stopProgressBar() {
+    this.showProgressBar.next(false);
+  }
+
+  public startProgressBar() {
+    this.showProgressBar.next(true);
+  }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
index 0d03ec3..ccf93f8 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
@@ -53,7 +53,7 @@
   }
 
   public getUserProjectsList(isActive?): Observable<{}> {
-    const params = isActive ? '?active=true' : '';
+    const params = isActive ? '/me?active=true' : '';
     return this.applicationServiceFacade
       .buildGetUserProjectsList(params)
       .pipe(
@@ -61,15 +61,6 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public deleteProject(data): Observable<{}> {
-    const url = `/${data}`;
-    return this.applicationServiceFacade
-      .buildDeleteProject(url)
-      .pipe(
-        map(response => response),
-        catchError(ErrorUtils.handleServiceError));
-  }
-
   public toggleProjectStatus(data, action): Observable<{}> {
     const url = `/${action}`;
     return this.applicationServiceFacade
@@ -79,15 +70,6 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public stopProjectAction(data): Observable<{}> {
-    const url = `/managing/stop/${data}`;
-    return this.applicationServiceFacade
-      .buildToggleProjectStatus(url, data)
-      .pipe(
-        map(response => response),
-        catchError(ErrorUtils.handleServiceError));
-  }
-
   public updateProjectsBudget(data): Observable<{}> {
     const url = '/budget';
     return this.applicationServiceFacade
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
index c595486..a854305 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
@@ -29,8 +29,8 @@
 export class SchedulerService {
   constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
 
-  public getExploratorySchedule(notebook, resource?): Observable<{}> {
-    const param = resource ? `/${notebook}/${resource}` : `/${notebook}`;
+  public getExploratorySchedule(project, notebook, resource?): Observable<{}> {
+    const param = resource ? `/${project}/${notebook}/${resource}` : `/${project}/${notebook}`;
     return this.applicationServiceFacade
       .buildGetExploratorySchedule(param)
       .pipe(
@@ -38,8 +38,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public setExploratorySchedule(notebook, data, resource?): Observable<ScheduleSchema> {
-    const param = resource ? `/${notebook}/${resource}` : `/${notebook}`;
+  public setExploratorySchedule(project, notebook, data, resource?): Observable<ScheduleSchema> {
+    const param = resource ? `/${project}/${notebook}/${resource}` : `/${project}/${notebook}`;
     return this.applicationServiceFacade
       .buildSetExploratorySchedule(param, data)
       .pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
index 224ba9c..6f7c254 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
@@ -37,10 +37,10 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getComputationalTemplates(project, endpoint): Observable<any> {
-    const url = `/${project}/${endpoint}/computational_templates`;
+  public getComputationalTemplates(project, endpoint, provider): Observable<any> {
+    const url = `/${project}/${endpoint}/templates`;
     return this.applicationServiceFacade
-      .buildGetTemplatesRequest(url)
+      .buildGetComputationTemplatesRequest(url, provider)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
@@ -72,8 +72,16 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
+  public getProjectByExploratoryEnvironment(): Observable<{}> {
+    return this.applicationServiceFacade
+      .buildGetExploratoryEnvironmentRequest()
+      .pipe(
+        map(response => response.body.project_exploratories),
+        catchError(ErrorUtils.handleServiceError));
+  }
+
   public suspendExploratoryEnvironment(notebook: any, action): Observable<{}> {
-    const url = '/' + notebook.name + '/' + action;
+    const url = '/' + notebook.project + '/' + notebook.name + '/' + action;
 
     return this.applicationServiceFacade
       .buildSuspendExploratoryEnvironmentRequest(JSON.stringify(url))
@@ -82,44 +90,44 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public createComputationalResource_DataengineService(data): Observable<{}> {
+  public createComputationalResource_DataengineService(data, provider): Observable<{}> {
     const body = JSON.stringify(data);
     return this.applicationServiceFacade
-      .buildCreateComputationalResources_DataengineServiceRequest(body)
+      .buildCreateComputationalResources_DataengineServiceRequest(body, provider)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public createComputationalResource_Dataengine(data): Observable<{}> {
+  public createComputationalResource_Dataengine(data, provider): Observable<{}> {
     const body = JSON.stringify(data);
     return this.applicationServiceFacade
-      .buildCreateComputationalResources_DataengineRequest(body)
+      .buildCreateComputationalResources_DataengineRequest(body, provider)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public suspendComputationalResource(notebookName: string, computationalResourceName: string): Observable<{}> {
-    const body = JSON.stringify('/' + notebookName + '/' + computationalResourceName + '/terminate');
+  public suspendComputationalResource(projectName: string, notebookName: string, computationalResourceName: string, provider: string): Observable<{}> {
+    const body = JSON.stringify('/' + projectName + '/' + notebookName + '/' + computationalResourceName + '/terminate');
     return this.applicationServiceFacade
-      .buildDeleteComputationalResourcesRequest(body)
+      .buildDeleteComputationalResourcesRequest(body, provider)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public toggleStopStartAction(project: string, notebook: string, resource: string, action): Observable<{}> {
+  public toggleStopStartAction(project: string, notebook: string, resource: string, action, provider: string): Observable<{}> {
     const url = `/${project}/${notebook}/${resource}/${action}`;
     if (action === 'stop') {
       return this.applicationServiceFacade
-        .buildStopSparkClusterAction(JSON.stringify(url))
+        .buildStopSparkClusterAction(JSON.stringify(url), provider)
         .pipe(
           map(response => response),
           catchError(ErrorUtils.handleServiceError));
     } else if (action === 'start') {
       return this.applicationServiceFacade
-        .buildStartSparkClusterAction(url)
+        .buildStartSparkClusterAction(url, provider)
         .pipe(
           map(response => response),
           catchError(ErrorUtils.handleServiceError));
@@ -152,9 +160,10 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getImagesList(): Observable<{}> {
+  public getImagesList(project?): Observable<{}> {
+    const body = project ? `/all?project=${project}` : '';
     return this.applicationServiceFacade
-      .buildGetImagesList()
+      .buildGetImagesList(body)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/util/checkUtils.ts b/services/self-service/src/main/resources/webapp/src/app/core/util/checkUtils.ts
index 994d18a..4cd39c3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/util/checkUtils.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/util/checkUtils.ts
@@ -40,6 +40,14 @@
     return true;
   }
 
+  public static numberOnly(event): boolean {
+    const charCode = (event.which) ? event.which : event.keyCode;
+    if (charCode > 31 && (charCode < 48 || charCode > 57)) {
+      return false;
+    }
+    return true;
+  }
+
   public static delimitersFiltering(resource): string {
     return resource.replace(RegExp(PATTERNS.delimitersRegex, 'g'), '').toString().toLowerCase();
   }
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/util/patterns.ts b/services/self-service/src/main/resources/webapp/src/app/core/util/patterns.ts
index 52e2670..ac9137f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/util/patterns.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/util/patterns.ts
@@ -23,5 +23,6 @@
   delimitersRegex: '[-_]?',
   url: '[a-zA-Z0-9.://%#&\\.@:%-_\+~#=]*\.[^\s]*[a-zA-Z0-9]/+',
   nodeCountPattern: '^[1-9]\\d*$',
-  integerRegex: '^[0-9]*$'
-}
+  integerRegex: '^[0-9]*$',
+  fullUrl: /^(http?|ftp|https):\/\/([a-zA-Z0-9.-]+(:[a-zA-Z0-9.&%$-]+)*@)*((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])){3}|([a-zA-Z0-9-]+\.)*[a-zA-Z0-9-]+([.:])(\d{4}|com|edu|gov|int|mil|net|org|biz|arpa|info|name|pro|aero|coop|museum|[a-zA-Z]{2}))(:[0-9]+)*(\/($|[a-zA-Z0-9.,?'\\+&%$#=~_-]+))*\/$/
+};
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/util/sortUtils.ts b/services/self-service/src/main/resources/webapp/src/app/core/util/sortUtils.ts
index 88c5a22..0a613ad 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/util/sortUtils.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/util/sortUtils.ts
@@ -24,6 +24,10 @@
     return order.indexOf(arg1) - order.indexOf(arg2);
   }
 
+  public static activeStatuses(): String[] {
+    return ['running', 'stopping', 'stopped', 'creating', 'configuring', 'reconfiguring', 'starting', 'creating image'];
+  }
+
   public static shapesSort(shapesJson) {
     const sortOrder = ['For testing', 'Memory optimized', 'GPU optimized', 'Compute optimized'];
     const sortedShapes = {};
@@ -40,4 +44,9 @@
 
     return groups.sort((arg1, arg2) => sortOrder.indexOf(arg1) - sortOrder.indexOf(arg2));
   }
+
+  public static flatDeep(arr, d = 1) {
+    return d > 0 ? arr.reduce((acc, val) => acc.concat(Array.isArray(val) ? this.flatDeep(val, d - 1) : val), [])
+      : arr.slice();
+  };
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/login/login.component.html b/services/self-service/src/main/resources/webapp/src/app/login/login.component.html
index 2266df3..d87e1d1 100644
--- a/services/self-service/src/main/resources/webapp/src/app/login/login.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/login/login.component.html
@@ -23,7 +23,7 @@
       <img src="assets/img/logo.png" alt="">
     </a>
     <form name="form" class="form-wrap" #f="ngForm" novalidate>
-      <!-- 
+      <!--
       <div class="input-icon-group">
         <span class="input-icon"><span class="login-icon icon-username"></span></span>
         <input type="text" class="input-with-icon" name="username" [(ngModel)]="model.username" [pattern]="userPattern" #username="ngModel" placeholder="Username" required />
@@ -34,8 +34,8 @@
         <input type="password" class="input-with-icon" placeholder="Password" name="password" [(ngModel)]="model.password" #password="ngModel" required />
       </div>
 
-      
-      <div class="form-group" [ngClass]="{'flex': DICTIONARY.cloud_provider === 'azure'}">
+
+      <div class="form-group" [ngClass]="{'flex': DICTIONARY['aws'].cloud_provider === 'azure'}">
         <button mat-raised-button [disabled]="loading || !username.valid || !password.valid"
                 (click)="login_btnClick();"
                 class="butt butt-login"
@@ -50,7 +50,7 @@
               </span>
             </span>
         </button>
-        <button *ngIf="DICTIONARY.cloud_provider === 'azure' && !DICTIONARY.use_ldap"
+        <button *ngIf="DICTIONARY['aws'].cloud_provider === 'azure' && !DICTIONARY['aws'].use_ldap"
                 class="butt butt-azure"
                 mat-raised-button (click)="loginWithAzure_btnClick();">
           Login with Azure
diff --git a/services/self-service/src/main/resources/webapp/src/app/login/login.component.ts b/services/self-service/src/main/resources/webapp/src/app/login/login.component.ts
index 64314c6..bc50072 100644
--- a/services/self-service/src/main/resources/webapp/src/app/login/login.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/login/login.component.ts
@@ -22,7 +22,6 @@
 
 import { LoginModel } from './login.model';
 import { AppRoutingService, HealthStatusService, ApplicationSecurityService } from '../core/services';
-import { HTTP_STATUS_CODES } from '../core/util';
 import { DICTIONARY } from '../../dictionary/global.dictionary';
 
 @Component({
@@ -69,12 +68,8 @@
 
         return false;
       }, error => {
-        if (DICTIONARY.cloud_provider === 'azure' && error && error.status === HTTP_STATUS_CODES.FORBIDDEN) {
-          window.location.href = error.headers.get('Location');
-        } else {
           this.error = error.message;
           this.loading = false;
-        }
       });
 
     return false;
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
index c6f6020..537cbb5 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
@@ -20,9 +20,9 @@
 <section class="table-wrapper">
   <table mat-table [dataSource]="reportData" class="data-grid reporting mat-elevation-z6">
 
-    <ng-container matColumnDef="name" sticky>
-      <th mat-header-cell *matHeaderCellDef class="env_name">
-        <span class="label"> Environment name </span>
+    <ng-container matColumnDef="name">
+      <th mat-header-cell *matHeaderCellDef class="env_name label-header">
+        <div class="label"><span class="text"> Environment name</span></div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filteredReportData.dlab_id.length > 0; else dlab_id_filtered">filter_list</span>
@@ -30,41 +30,51 @@
           </i>
         </button>
       </th>
-      <td mat-cell *matCellDef="let element"> {{element[DICTIONARY.billing.dlabId]}} </td>
-      <td mat-footer-cell *matFooterCellDef> Total </td>
+      <td mat-cell *matCellDef="let element"><span class="table-item">{{element.dlabId}}</span></td>
+      <td mat-footer-cell *matFooterCellDef class="table-footer"></td>
     </ng-container>
 
     <ng-container matColumnDef="user">
-      <th mat-header-cell *matHeaderCellDef class="th_user">
-        <span class="label"> User </span>
+      <th mat-header-cell *matHeaderCellDef class="th_user label-header">
+        <div class="sort">
+          <div class="sort-arrow up" (click)="sortBy('user', 'down')" [ngClass]="{'active': !!this.active['userdown']}"></div>
+          <div class="sort-arrow down" (click)="sortBy('user', 'up')" [ngClass]="{'active': !!this.active['userup']}"></div>
+        </div>
+        <div class="label">
+          <span class="text"> User </span>
+        </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
-            <span *ngIf="filteredReportData.user.length > 0; else user_filtered">filter_list</span>
+            <span *ngIf="filteredReportData.users.length > 0; else user_filtered">filter_list</span>
             <ng-template #user_filtered>more_vert</ng-template>
           </i>
         </button>
       </th>
       <td mat-cell *matCellDef=" let element"> {{element.user}} </td>
-      <td mat-footer-cell *matFooterCellDef></td>
+      <td mat-footer-cell *matFooterCellDef  class="table-footer"></td>
     </ng-container>
 
     <ng-container matColumnDef="project">
-      <th mat-header-cell *matHeaderCellDef class="th_project">
-        <span class="label">Project</span>
+      <th mat-header-cell *matHeaderCellDef class="th_project label-header">
+        <div class="sort">
+          <div class="sort-arrow up" (click)="sortBy('project', 'down')" [ngClass]="{'active': !!this.active['projectdown']}"></div>
+          <div class="sort-arrow down" (click)="sortBy('project', 'up')" [ngClass]="{'active': !!this.active['projectup']}"></div>
+        </div>
+        <div class="label"><span class="text">Project</span></div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
-            <span *ngIf="filteredReportData.project.length > 0; else project_filtered">filter_list</span>
+            <span *ngIf="filteredReportData.projects.length > 0; else project_filtered">filter_list</span>
             <ng-template #project_filtered>more_vert</ng-template>
           </i>
         </button>
       </th>
       <td mat-cell *matCellDef="let element"> {{element.project}} </td>
-      <td mat-footer-cell *matFooterCellDef></td>
+      <td mat-footer-cell *matFooterCellDef class="table-footer"></td>
     </ng-container>
 
     <ng-container matColumnDef="type">
-      <th mat-header-cell *matHeaderCellDef class="th_type">
-        <span class="label"> Resource Type </span>
+      <th mat-header-cell *matHeaderCellDef class="th_type label-header">
+        <div class="label"><span class="text"> Resource Type</span> </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filteredReportData.resource_type.length > 0; else type_filtered">filter_list</span>
@@ -72,16 +82,16 @@
           </i>
         </button>
       </th>
-      <td mat-cell *matCellDef="let element"> {{element[DICTIONARY.billing.resourceType]}} </td>
-      <td mat-footer-cell *matFooterCellDef></td>
+      <td mat-cell *matCellDef="let element"> {{element.resource_type | titlecase}} </td>
+      <td mat-footer-cell *matFooterCellDef class="table-footer"></td>
     </ng-container>
 
     <ng-container matColumnDef="status">
-      <th mat-header-cell *matHeaderCellDef class="th_status">
-        <span class="label"> Status </span>
+      <th mat-header-cell *matHeaderCellDef class="th_status label-header">
+        <div class="label"><span class="text"> Status</span> </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
-            <span *ngIf="filteredReportData.status.length > 0; else status_filtered">filter_list</span>
+            <span *ngIf="filteredReportData.statuses.length > 0; else status_filtered">filter_list</span>
             <ng-template #status_filtered>more_vert</ng-template>
           </i>
         </button>
@@ -91,108 +101,116 @@
           *ngIf="element.status">{{ element.status.toLowerCase() }}</span>
         <span *ngIf="!element.status">N/A</span>
       </td>
-      <td mat-footer-cell *matFooterCellDef></td>
+      <td mat-footer-cell *matFooterCellDef class="table-footer"></td>
     </ng-container>
 
     <ng-container matColumnDef="shape">
-      <th mat-header-cell *matHeaderCellDef class="th_shape">
-        <span class="label"> {{ DICTIONARY.instance_size}} </span>
+      <th mat-header-cell *matHeaderCellDef class="th_shape label-header">
+        <div class="label"><span class="text"> Instance size</span></div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span
-              *ngIf="filteredReportData[DICTIONARY.billing.instance_size].length > 0; else shape_filtered">filter_list</span>
+              *ngIf="filteredReportData['shapes'].length > 0; else shape_filtered">filter_list</span>
             <ng-template #shape_filtered>more_vert</ng-template>
           </i>
         </button>
       </th>
       <td mat-cell *matCellDef="let element">
-        <span [outerHTML]="element[DICTIONARY.billing.instance_size] | lineBreak"></span>
+        <ng-container *ngIf="element.shape">
+          <div *ngFor="let shape of shapeSplit(element.shape)">{{shape}}</div>
+        </ng-container>
       </td>
-      <td mat-footer-cell *matFooterCellDef></td>
+      <td mat-footer-cell *matFooterCellDef class="table-footer"></td>
     </ng-container>
 
     <ng-container matColumnDef="service">
-      <th mat-header-cell *matHeaderCellDef class="service">
-        <span class="label"> {{ DICTIONARY.service}} </span>
+      <th mat-header-cell *matHeaderCellDef class="service label-header">
+        <div class="label"><span class="text"> Product</span> </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span
-              *ngIf="filteredReportData[DICTIONARY.billing.instance_size].length > 0; else service_filtered">filter_list</span>
+              *ngIf="filteredReportData['shapes'].length > 0; else service_filtered">filter_list</span>
             <ng-template #service_filtered>more_vert</ng-template>
           </i>
         </button>
       </th>
       <td mat-cell *matCellDef="let element">
-        {{ element[DICTIONARY.billing.service] }}
-        <span *ngIf="element.resource_type">({{ element.resource_type }})</span>
+        {{ element.product }}
+<!--        <span *ngIf="element.product">{{ element.product }}</span>-->
       </td>
-      <td mat-footer-cell *matFooterCellDef></td>
+      <td mat-footer-cell *matFooterCellDef class="table-footer"></td>
     </ng-container>
 
     <ng-container matColumnDef="charge" stickyEnd>
-      <th mat-header-cell *matHeaderCellDef class="th_charges">
-        <span class="label"> Service Charges </span>
+      <th mat-header-cell *matHeaderCellDef class="th_charges label-header">
+        <div class="label">
+          <div class="sort">
+            <div class="sort-arrow up" (click)="sortBy('cost', 'down')" [ngClass]="{'active': !!this.active['costdown']}"></div>
+            <div class="sort-arrow down" (click)="sortBy('cost', 'up')" [ngClass]="{'active': !!this.active['costup']}"></div>
+          </div>
+          <span class="text">Service Charges</span>
+        </div>
       </th>
 
       <td mat-cell *matCellDef="let element">
-        {{ element[DICTIONARY.billing.cost] }} {{ element[DICTIONARY.billing.currencyCode] }}
+        {{ element.cost }} {{ element['currency'] }}
       </td>
-      <td mat-footer-cell *matFooterCellDef>
-        <span *ngIf="reportData?.length">{{ fullReport[DICTIONARY.billing.costTotal] }}
-          {{ fullReport[DICTIONARY.billing.currencyCode] }}</span>
+      <td mat-footer-cell *matFooterCellDef class="table-footer total-cost">
+        Total <span *ngIf="reportData?.length"> {{ fullReport['total_cost'] }}
+          {{ fullReport['currency'] }}</span>
       </td>
     </ng-container>
 
     <!-- ----------------FILTER -->
-    <ng-container matColumnDef="name-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef>
+    <ng-container matColumnDef="name-filter">
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <input #nameFilter type="text" placeholder="Filter by environment name" class="form-control filter-field"
-          [value]="filtered?.dlab_id" (input)="filteredReportData.dlab_id = $event.target.value" />
+          [value]="filtered?.dlab_id" (input)="filteredReportData.dlab_id = $event.target['value']" />
       </th>
     </ng-container>
     <ng-container matColumnDef="user-filter">
-      <th mat-header-cell *matHeaderCellDef>
-        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="'user'"
-          [items]="filterConfiguration.user" [model]="filteredReportData.user"></multi-select-dropdown>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
+        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="'users'"
+          [items]="filterConfiguration.users" [model]="filteredReportData.users"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="project-filter">
-      <th mat-header-cell *matHeaderCellDef>
-        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="'project'"
-          [items]="filterConfiguration.project" [model]="filteredReportData.project"></multi-select-dropdown>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
+        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="'projects'"
+          [items]="filterConfiguration.projects" [model]="filteredReportData.projects"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="type-filter">
-      <th mat-header-cell *matHeaderCellDef>
-        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="'resource_type'"
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
+        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="['resource_type']"
           [items]="filterConfiguration.resource_type" [model]="filteredReportData.resource_type">
         </multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="status-filter">
-      <th mat-header-cell *matHeaderCellDef>
-        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="'status'"
-          [items]="filterConfiguration.status" [model]="filteredReportData.status"></multi-select-dropdown>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
+        <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)" [type]="'statuses'"
+          [items]="filterConfiguration.statuses" [model]="filteredReportData.statuses"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="shape-filter">
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)"
-          [type]="[DICTIONARY.billing.instance_size]" [items]="filterConfiguration[DICTIONARY.billing.instance_size]"
-          [model]="filteredReportData[DICTIONARY.billing.instance_size]"></multi-select-dropdown>
+          [type]="'shapes'" [items]="filterConfiguration['shapes']"
+          [model]="filteredReportData['shapes']"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="service-filter">
-      <th mat-header-cell *matHeaderCellDef>
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
         <multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)"
-          [type]="[DICTIONARY.billing.service_filter_key]"
-          [items]="filterConfiguration[DICTIONARY.billing.service_filter_key]"
-          [model]="filteredReportData[DICTIONARY.billing.service_filter_key]"></multi-select-dropdown>
+          [type]="['products']"
+          [items]="filterConfiguration['products']"
+          [model]="filteredReportData['products']"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="actions" stickyEnd>
-      <th mat-header-cell *matHeaderCellDef>
-        <div class="actions">
+      <th mat-header-cell *matHeaderCellDef class="filter-row-item">
+        <div class="actions th_charges">
           <button mat-icon-button class="btn reset" (click)="resetFiltering(); isFiltered = !isFiltered">
             <i class="material-icons">close</i>
           </button>
@@ -209,12 +227,11 @@
       </td>
     </ng-container>
 
-
     <tr mat-header-row *matHeaderRowDef="displayedColumns; sticky: true" class="header-row"></tr>
 
     <tr [hidden]="!collapseFilterRow" mat-header-row *matHeaderRowDef="displayedFilterColumns; sticky: true"
       class="filter-row"></tr>
-    <tr mat-row *matRowDef="let row; columns: displayedColumns;" class="DATA"></tr>
+    <tr mat-row *matRowDef="let row; columns: displayedColumns;" class="content-row"></tr>
 
     <tr [hidden]="!reportData?.length" mat-footer-row *matFooterRowDef="displayedColumns; sticky: true"
       class="header-row"></tr>
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
index 2ceff2b..9c4f819 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
@@ -19,6 +19,7 @@
 
 .table-wrapper {
   width: 100%;
+
 }
 
 .reporting {
@@ -35,13 +36,19 @@
     th {
       padding-right: 5px;
       z-index: 2 !important;
+      &.th_charges{
+        z-index: 3 !important;
+      }
     }
 
     td {
       font-size: 13px;
+      padding-left: 15px;
 
       &.info {
         z-index: 1 !important;
+        text-align: center;
+        padding: 40px;
       }
     }
 
@@ -60,38 +67,85 @@
     &.header-row {
       th {
         font-size: 11px;
+        .label{
+          padding-left: 0;
+        }
       }
     }
   }
 
   .th_shape {
     width: 10%;
+    min-width: 150px;
   }
 
   .th_user,
   .env_name,
-  .service {
+  .service,
+  .tags {
     width: 15%;
+    min-width: 150px;
     overflow: hidden;
     word-wrap: break-word;
   }
 
+  .tags {
+    .label {
+      padding-top: 0;
+    }
+  }
+
+  .service {
+    min-width: 175px;
+  }
+
   .env_name {
     width: 16%;
+    min-width: 200px;
+  }
+
+  .th_project{
+    width: 12%;
   }
 
   .th_type {
-    width: 12%;
+    width: 10%;
+    min-width: 150px;
   }
 
   .th_status {
     width: 8%;
+    min-width: 150px;
   }
 
   .th_charges {
-    width: 8%;
-    padding-right: 15px;
+    width: 10%;
+    min-width: 155px;
     text-align: right;
+
+    .label {
+      padding-top: 0;
+    }
+  }
+
+  .th_project {
+    min-width: 150px;
+  }
+
+  .tags-col {
+    padding: 5px;
+
+    mat-chip {
+      min-height: 20px;
+      padding: 5px 10px;
+      font-size: 13px;
+      max-width: 110px !important;
+      text-overflow: ellipsis;
+      white-space: nowrap;
+      display: inline-block;
+      line-height: 10px;
+      margin: 2px;
+    }
   }
 
   .mat-column-charge {
@@ -99,19 +153,71 @@
   }
 
   .header-row {
+    position: unset;
+
+    .th_charges {
+      padding-top: 0;
+
+      .label {
+        padding-top: 12px;
+      }
+    }
+
     .label {
       display: inline-block;
-      padding-top: 10px;
+      padding-top: 13px;
       vertical-align: super !important;
-      padding-left: 5px;
+
+      .text{
+     padding-left: 15px;
+      }
+    }
+
+    .sort{
+      position: absolute;
+      bottom: 20px;
+
+      &-arrow{
+        width: 6px;
+        height: 6px;
+        border: 3px solid transparent;
+        border-bottom: 3px solid rgba(0,0,0,.54);
+        border-left: 3px solid rgba(0,0,0,.54);
+        cursor: pointer;
+
+        &.active{
+          border-bottom: 3px solid #35afd5;
+          border-left: 3px solid #35afd5;
+        }
+      }
+
+      .down{
+        transform: rotate(-45deg);
+      }
+
+      .up{
+        transform: rotate(135deg);
+      }
     }
   }
 
+
+
   .filter-row {
     .actions {
       text-align: right;
     }
   }
+
+  .table-footer{
+    position: sticky;
+    bottom: 0;
+    background: inherit;
+    border-top: 1px solid #E0E0E0;
+    transform: translateY(-1px);
+    border-bottom: none;
+    padding-left: 0 !important;
+  }
 }
 
 .dashboard_table_body {
@@ -128,13 +234,26 @@
   }
 }
 
+.table-footer{
+  position: sticky;
+  bottom: 0;
+  background: inherit;
+  border-top: 1px solid #E0E0E0;
+  transform: translateY(-1px);
+  border-bottom: none;
+
+  &.total-cost{
+    min-width: 140px;
+    padding-left: 0 !important;
+  }
+}
+
 @media screen and (max-width: 1280px) {
   .dashboard_table.reporting {
-
     .env_name,
     .service,
     .th_type,
-    .th_rstatus {
+    .th_status {
       width: 10%;
     }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts
index d303723..d4e0076 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts
@@ -17,9 +17,8 @@
  * under the License.
  */
 
-import { Component, OnInit, Output, EventEmitter, ViewChild } from '@angular/core';
-
-import { DICTIONARY, ReportingConfigModel } from '../../../dictionary/global.dictionary';
+import {Component, OnInit, Output, EventEmitter, ViewChild, Input} from '@angular/core';
+import { ReportingConfigModel } from '../../../dictionary/global.dictionary';
 
 @Component({
   selector: 'dlab-reporting-grid',
@@ -29,7 +28,6 @@
 
 })
 export class ReportingGridComponent implements OnInit {
-  readonly DICTIONARY = DICTIONARY;
 
   filterConfiguration: ReportingConfigModel;
   filteredReportData: ReportingConfigModel = new ReportingConfigModel([], [], [], [], [], '', '', '', []);
@@ -37,6 +35,7 @@
   reportData: Array<any> = [];
   fullReport: Array<any>;
   isFiltered: boolean = false;
+  active: object = {};
 
   @ViewChild('nameFilter', { static: false }) filter;
 
@@ -44,8 +43,9 @@
   @Output() resetRangePicker: EventEmitter<boolean> = new EventEmitter();
   displayedColumns: string[] = ['name', 'user', 'project', 'type', 'status', 'shape', 'service', 'charge'];
   displayedFilterColumns: string[] = ['name-filter', 'user-filter', 'project-filter', 'type-filter', 'status-filter', 'shape-filter', 'service-filter', 'actions'];
+  filtered: any;
 
-  ngOnInit() { }
+  ngOnInit() {}
 
   onUpdate($event): void {
     this.filteredReportData[$event.type] = $event.model;
@@ -63,6 +63,33 @@
     }
   }
 
+  sortBy(sortItem, direction) {
+  let report: Array<object>;
+  if (direction === 'down') {
+    report = this.reportData.sort((a, b) => {
+      if (a[sortItem] === null) a = '';
+      if (b[sortItem] === null) b = '';
+     return (a[sortItem] > b[sortItem]) ? 1 : -1;
+    });
+  }
+  if (direction === 'up') {
+    report = this.reportData.sort((a, b) => {
+      if (a[sortItem] === null) a = '';
+      if (b[sortItem] === null) b = '';
+      return (a[sortItem] < b[sortItem]) ? 1 : -1 ;
+    });
+  }
+  this.refreshData(this.fullReport, report);
+  this.removeSorting();
+  this.active[sortItem + direction] = true;
+  }
+
+  removeSorting() {
+    for (const item in this.active) {
+      this.active[item] = false;
+    }
+  }
+
   toggleFilterRow(): void {
     this.collapseFilterRow = !this.collapseFilterRow;
   }
@@ -74,13 +101,18 @@
   filter_btnClick(): void {
     this.filterReport.emit(this.filteredReportData);
     this.isFiltered = true;
+    this.removeSorting();
   }
 
   resetFiltering(): void {
     this.filteredReportData.defaultConfigurations();
-
-    this.filter.nativeElement.value = ''
+    this.removeSorting();
+    this.filter.nativeElement.value = '';
     this.filterReport.emit(this.filteredReportData);
     this.resetRangePicker.emit(true);
   }
+
+  shapeSplit(shape) {
+    return shape.split(/(?=Slave)/g);
+  }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
index 8ecf158..1692ef1 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
@@ -20,13 +20,13 @@
 
 import { Component, OnInit, OnDestroy, ViewChild } from '@angular/core';
 import { ToastrService } from 'ngx-toastr';
-
-import { BillingReportService, HealthStatusService } from '../core/services';
+import {ApplicationSecurityService, BillingReportService, HealthStatusService} from '../core/services';
 import { ReportingGridComponent } from './reporting-grid/reporting-grid.component';
 import { ToolbarComponent } from './toolbar/toolbar.component';
 
 import { FileUtils } from '../core/util';
 import { DICTIONARY, ReportingConfigModel } from '../../dictionary/global.dictionary';
+import {ProgressBarService} from '../core/services/progress-bar.service';
 
 @Component({
   selector: 'dlab-reporting',
@@ -44,8 +44,8 @@
   styles: [`
     footer {
       position: fixed;
-      left: 0px;
-      bottom: 0px;
+      left: 0;
+      bottom: 0;
       width: 100%;
       background: #a1b7d1;
       color: #ffffff;
@@ -71,11 +71,14 @@
   constructor(
     private billingReportService: BillingReportService,
     private healthStatusService: HealthStatusService,
-    public toastr: ToastrService
+    public toastr: ToastrService,
+    private progressBarService: ProgressBarService,
+    private applicationSecurityService: ApplicationSecurityService,
   ) { }
 
   ngOnInit() {
-    this.rebuildBillingReport();
+    this.getEnvironmentHealthStatus();
+    this.buildBillingReport();
   }
 
   ngOnDestroy() {
@@ -83,18 +86,18 @@
   }
 
   getGeneralBillingData() {
-
+    setTimeout(() => {this.progressBarService.startProgressBar(); } , 0);
     this.billingReportService.getGeneralBillingData(this.reportData)
       .subscribe(data => {
         this.data = data;
-        this.reportingGrid.refreshData(this.data, this.data.lines);
-        this.reportingGrid.setFullReport(this.data.full_report);
+        this.reportingGrid.refreshData(this.data, this.data.report_lines);
+        this.reportingGrid.setFullReport(this.data.is_full);
 
         this.reportingToolbar.reportData = this.data;
         if (!localStorage.getItem('report_period')) {
           localStorage.setItem('report_period', JSON.stringify({
-            start_date: this.data[DICTIONARY.billing.dateFrom],
-            end_date: this.data[DICTIONARY.billing.dateTo]
+            start_date: this.data['from'],
+            end_date: this.data['to']
           }));
           this.reportingToolbar.setDateRange();
         }
@@ -105,29 +108,41 @@
         } else {
           this.getDefaultFilterConfiguration(this.data);
         }
-      });
+        this.progressBarService.stopProgressBar();
+      }, () => this.progressBarService.stopProgressBar());
   }
 
-  rebuildBillingReport($event?): void {
+  rebuildBillingReport(): void {
+    this.checkAutorize();
+    this.buildBillingReport();
+
+  }
+
+  buildBillingReport() {
     this.clearStorage();
     this.resetRangePicker();
     this.reportData.defaultConfigurations();
-
-    this.getEnvironmentHealthStatus();
     this.getGeneralBillingData();
   }
 
+  private checkAutorize() {
+    this.applicationSecurityService.isLoggedIn().subscribe( () => {
+        this.getEnvironmentHealthStatus();
+      }
+    );
+  }
+
   exportBillingReport(): void {
     this.billingReportService.downloadReport(this.reportData)
       .subscribe(
         data => FileUtils.downloadFile(data),
-        error => this.toastr.error('Billing report export failed!', 'Oops!'));
+        () => this.toastr.error('Billing report export failed!', 'Oops!'));
   }
 
   getDefaultFilterConfiguration(data): void {
     const users = [], types = [], shapes = [], services = [], statuses = [], projects = [];
 
-    data.lines.forEach((item: any) => {
+    data.report_lines.forEach((item: any) => {
       if (item.user && users.indexOf(item.user) === -1)
         users.push(item.user);
 
@@ -137,30 +152,29 @@
       if (item.project && projects.indexOf(item.project) === -1)
         projects.push(item.project);
 
-      if (item[DICTIONARY.billing.resourceType] && types.indexOf(item[DICTIONARY.billing.resourceType]) === -1)
-        types.push(item[DICTIONARY.billing.resourceType]);
+      if (item['resource_type'] && types.indexOf(item['resource_type']) === -1)
+        types.push(item['resource_type']);
 
-      if (item[DICTIONARY.billing.instance_size]) {
-        if (item[DICTIONARY.billing.instance_size].indexOf('Master') > -1) {
-          for (let shape of item[DICTIONARY.billing.instance_size].split('\n')) {
+      if (item.shape && types.indexOf(item.shape)) {
+       if (item.shape.indexOf('Master') > -1) {
+          for (let shape of item.shape.split(/(?=Slave)/g)) {
             shape = shape.replace('Master: ', '');
-            shape = shape.replace(/Slave:\s+\d+ x /, '');
+            shape = shape.replace(/Slave: /, '');
             shape = shape.replace(/\s+/g, '');
-
             shapes.indexOf(shape) === -1 && shapes.push(shape);
           }
-        } else if (item[DICTIONARY.billing.instance_size].match(/\d x \S+/)) {
-          const parsedShape = item[DICTIONARY.billing.instance_size].match(/\d x \S+/)[0].split(' x ')[1];
+        } else if (item.shape.match(/\d x \S+/)) {
+          const parsedShape = item.shape.match(/\d x \S+/)[0].split(' x ')[1];
           if (shapes.indexOf(parsedShape) === -1) {
             shapes.push(parsedShape);
           }
         } else {
-          shapes.indexOf(item[DICTIONARY.billing.instance_size]) === -1 && shapes.push(item[DICTIONARY.billing.instance_size]);
+          shapes.indexOf(item.shape) === -1 && shapes.push(item.shape);
         }
       }
 
-      if (item[DICTIONARY.billing.service] && services.indexOf(item[DICTIONARY.billing.service]) === -1)
-        services.push(item[DICTIONARY.billing.service]);
+      if (item.product && services.indexOf(item.product) === -1)
+        services.push(item.product);
     });
 
     if (!this.reportingGrid.filterConfiguration || !localStorage.getItem('report_config')) {
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.html b/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.html
index e425338..583371e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.html
@@ -16,11 +16,12 @@
   ~ specific language governing permissions and limitations
   ~ under the License.
   -->
-<section>
+<section class="toolbar">
   <div class="info_color" *ngIf="reportData">
     <div class="general">
-      <div><span>Service base name: </span><strong>{{ reportData.service_base_name }}</strong></div>
-      <div *ngIf="reportData.tag_resource_id"><span>Resource tag ID: </span><strong>{{ reportData.tag_resource_id }}</strong></div>
+      <div><span>Service base name: </span><strong>{{ reportData.sbn }}</strong></div>
+      <div *ngIf="reportData.tag_resource_id"><span>Resource tag ID:
+        </span><strong>{{ reportData.tag_resource_id }}</strong></div>
       <div class="report-period info_color" *ngIf="availablePeriodFrom && availablePeriodTo">
         <span>Available reporting period from:</span>
         <strong>{{ availablePeriodFrom | date }} </strong>
@@ -33,12 +34,11 @@
     <ng-daterangepicker [(ngModel)]="value" [options]="options" (ngModelChange)="onChange($event)"></ng-daterangepicker>
   </div>
   <div class="action-butt">
-    <button mat-raised-button class="butt" (click)="export($event)" [disabled]="!reportData?.lines.length">
+    <button mat-raised-button class="butt" (click)="export($event)" [disabled]="!reportData?.report_lines.length">
       <i class="material-icons">file_download</i>Export
     </button>
     <button mat-raised-button class="butt" (click)="rebuild($event)">
       <i class="material-icons">autorenew</i>Refresh
     </button>
-
   </div>
 </section>
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.css b/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.scss
similarity index 66%
rename from services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.css
rename to services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.scss
index 5e82739..4a150bf 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.css
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.scss
@@ -17,108 +17,124 @@
  * under the License.
  */
 
-section {
-    display: flex;
-    justify-content: space-between;
-    font-weight: 300;
-}
-section > div {
+section.toolbar {
+  display: flex;
+  justify-content: space-between;
+  font-weight: 300;
+  width: 100%;
+
+  >div {
     width: 33%;
-}
-section > div:nth-child(2) {
+  }
+
+  >div:nth-child(2) {
     text-align: center;
-}
-section > div:nth-child(3) {
+  }
+
+  >div:nth-child(3) {
     text-align: right;
-}
+  }
 
-.action-butt {
+  .action-butt {
     align-self: center;
-}
-.action-butt button:first-child{
-    margin-right: 10px;
-}
+  }
 
-.action-butt .butt-report-range {
+  button {
+    &:first-child {
+      margin-right: 10px;
+    }
+  }
+
+  .butt-report-range {
     width: 240px;
     margin-right: 10px;
-}
+  }
 
-.general {
+  .general {
     font-size: 13px;
-}
-.general div span {
-    width: 190px;
-    display: inline-block;
+
+    div {
+      span {
+        width: 190px;
+        display: inline-block;
+      }
+    }
+  }
 }
 
- /* daterangepicker themes */
+/* daterangepicker themes */
 #range-picker {
-    margin-top: 5px;
+  margin-top: 5px;
 }
+
 #range-picker path#Shape {
-    fill: #36afd5;
+  fill: #36afd5;
 }
 
 #range-picker .ng-daterangepicker,
 #range-picker .ng-daterangepicker.is-active,
 #range-picker .ng-daterangepicker .calendar {
-    border: none;
-    border-radius: 0;
-    box-shadow: 0 3px 1px -2px rgba(0,0,0,.2), 0 2px 2px 0 rgba(0,0,0,.14), 0 1px 5px 0 rgba(0,0,0,.12);
+  border: none;
+  border-radius: 0;
+  box-shadow: 0 3px 1px -2px rgba(0, 0, 0, .2), 0 2px 2px 0 rgba(0, 0, 0, .14), 0 1px 5px 0 rgba(0, 0, 0, .12);
 }
 
 #range-picker .ng-daterangepicker .calendar::after {
-    border-top: 1px solid rgba(234, 234, 234, 0.64);
-    border-left: 1px solid rgba(234, 234, 234, 0.64);
+  border-top: 1px solid rgba(234, 234, 234, 0.64);
+  border-left: 1px solid rgba(234, 234, 234, 0.64);
 }
 
 #range-picker .ng-daterangepicker .calendar .side-container .side-button {
-    background: #fff;
-    color: #718ba6;
-    border: none;
-    border-radius: 0px;
-    box-shadow: 0 3px 1px -2px rgba(0,0,0,.2), 0 2px 2px 0 rgba(0,0,0,.14), 0 1px 5px 0 rgba(0,0,0,.12)
+  background: #fff;
+  color: #718ba6;
+  border: none;
+  border-radius: 0px;
+  box-shadow: 0 3px 1px -2px rgba(0, 0, 0, .2), 0 2px 2px 0 rgba(0, 0, 0, .14), 0 1px 5px 0 rgba(0, 0, 0, .12)
 }
+
 #range-picker .ng-daterangepicker .calendar .side-container .side-button.is-active,
 #range-picker .ng-daterangepicker .input-section .label-txt {
-    color: #35afd5;
+  color: #35afd5;
 }
+
 #range-picker .ng-daterangepicker .calendar .calendar-container .day-num.is-active,
 #range-picker .ng-daterangepicker .calendar .calendar-container .days .day-num:hover {
-    background: #35afd5;
-    background-clip: padding-box;
+  background: #35afd5;
+  background-clip: padding-box;
 
 }
+
 #range-picker .ng-daterangepicker .calendar .calendar-container .day-names,
 #range-picker .ng-daterangepicker .calendar .calendar-container .days {
-    width: 310px;
+  width: 310px;
 }
+
 #range-picker .ng-daterangepicker .calendar .day.is-within-range.is-first-weekday,
 #range-picker .ng-daterangepicker .calendar .day.is-within-range.is-last-weekday {
-    background-clip: padding-box;
+  background-clip: padding-box;
 }
 
 #range-picker .ng-daterangepicker .calendar .calendar-container .day.is-within-range {
-    background: #e9f8fc
+  background: #e9f8fc
 }
 
 #range-picker .ng-daterangepicker .input-section .cal-icon svg path {
-    fill: #35afd5;
+  fill: #35afd5;
 }
 
 #range-picker .ng-daterangepicker .input-section .value-txt {
-    color: #718ba6;
+  color: #718ba6;
 }
 
 #range-picker .ng-daterangepicker .input-section .value-txt.untouched,
 #range-picker .ng-daterangepicker .input-section .label-txt.untouched {
-    color: #fff;
+  color: #fff;
 }
+
 #range-picker .ng-daterangepicker .input-section .value-txt.untouched::after {
-    content: 'Select date';
-    position: absolute;
-    top: 22px;
-    left: 34px;
-    color: #718ba6;
+  content: 'Select date';
+  position: absolute;
+  top: 22px;
+  left: 34px;
+  color: #718ba6;
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.ts b/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.ts
index af51d0a..5272e37 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/toolbar/toolbar.component.ts
@@ -20,11 +20,15 @@
 import { Component, OnInit, AfterViewInit, Output, EventEmitter, ViewEncapsulation, ViewChild } from '@angular/core';
 import { NgDateRangePickerOptions } from 'ng-daterangepicker';
 import { DICTIONARY } from '../../../dictionary/global.dictionary';
+import {skip} from 'rxjs/operators';
+import {Subscription} from 'rxjs';
+import {HealthStatusService} from '../../core/services';
+import {GeneralEnvironmentStatus} from '../../administration/management/management.model';
 
 @Component({
   selector: 'dlab-toolbar',
   templateUrl: './toolbar.component.html',
-  styleUrls: ['./toolbar.component.css'],
+  styleUrls: ['./toolbar.component.scss'],
   encapsulation: ViewEncapsulation.None
 })
 export class ToolbarComponent implements OnInit, AfterViewInit {
@@ -33,8 +37,10 @@
   reportData: any;
   availablePeriodFrom: string;
   availablePeriodTo: string;
+  subscriptions: Subscription = new Subscription();
+  healthStatus: GeneralEnvironmentStatus;
 
-  rangeOptions = {'YTD': 'Year To Date', 'QTD': 'Quarter To Date', 'MTD': 'Month To Date', 'reset': 'All Period Report'};
+  rangeOptions = { 'YTD': 'Year To Date', 'QTD': 'Quarter To Date', 'MTD': 'Month To Date', 'reset': 'All Period Report' };
   options: NgDateRangePickerOptions;
   rangeLabels: any;
 
@@ -42,7 +48,7 @@
   @Output() exportReport: EventEmitter<{}> = new EventEmitter();
   @Output() setRangeOption: EventEmitter<{}> = new EventEmitter();
 
-  constructor() {
+  constructor(private healthStatusService: HealthStatusService) {
     this.options = {
       theme: 'default',
       range: 'tm',
@@ -55,11 +61,13 @@
   }
 
   ngOnInit() {
-     if (localStorage.getItem('report_period')) {
-        const availableRange = JSON.parse(localStorage.getItem('report_period'));
-        this.availablePeriodFrom = availableRange.start_date;
-        this.availablePeriodTo = availableRange.end_date;
-     }
+    if (localStorage.getItem('report_period')) {
+      const availableRange = JSON.parse(localStorage.getItem('report_period'));
+      this.availablePeriodFrom = availableRange.start_date;
+      this.availablePeriodTo = availableRange.end_date;    }
+    this.subscriptions.add(this.healthStatusService.statusData.pipe(skip(1)).subscribe(result => {
+      this.healthStatus = result;
+    }));
   }
 
   ngAfterViewInit() {
@@ -86,11 +94,13 @@
     for (let label = 0; label < rangeLabels.length; ++label)
       if (rangeLabels[label].classList.contains('untouched')) {
         rangeLabels[label].classList.remove('untouched');
-    }
+      }
 
     const reportDateRange = dateRange.split('-');
-    this.setRangeOption.emit({start_date: reportDateRange[0].split('/').join('-'),
-      end_date: reportDateRange[1].split('/').join('-')});
+    this.setRangeOption.emit({
+      start_date: reportDateRange[0].split('/').join('-'),
+      end_date: reportDateRange[1].split('/').join('-')
+    });
   }
 
   rebuild($event): void {
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.html
index fc24a94..04ea086 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.html
@@ -35,7 +35,7 @@
               {{resource.status}}
             </span>
           </td>
-          <td>{{ resource[DICTIONARY[resource.image].master_node_shape] }}</td>
+          <td>{{ resource[DICTIONARY[PROVIDER][resource.image].master_node_shape] }}</td>
         </tr>
       </table>
       <div class="detail-info content-box" *ngIf="!resource.error_message">
@@ -49,7 +49,7 @@
           <div class="col">
             <p>Cluster version:</p>
           </div>
-          <div class="col"><span>{{ resource[DICTIONARY.cluster_version] }}</span></div>
+          <div class="col"><span>{{ resource[DICTIONARY[PROVIDER].cluster_version] }}</span></div>
         </div>
         <div class="row-wrap">
           <div class="col">
@@ -63,34 +63,34 @@
         </div>
         <div class="row-wrap">
           <div class="col"
-            *ngIf="DICTIONARY.cloud_provider === 'gcp' && resource.image === 'docker.dlab-dataengine-service'; else inst">
-            <p>{{ DICTIONARY[resource.image].master_instance_number }}:</p>
+            *ngIf="DICTIONARY[PROVIDER].cloud_provider === 'gcp' && resource.image === 'docker.dlab-dataengine-service'; else inst">
+            <p>Master instance number:</p>
           </div>
           <ng-template #inst>
             <div class="col">
-              <p>{{ DICTIONARY[resource.image].instance_number }}:</p>
+              <p>Total instance number:</p>
             </div>
           </ng-template>
-          <div class="col"><span>{{ resource[DICTIONARY[resource.image].total_instance_number] }}</span></div>
+          <div class="col"><span>{{ resource[DICTIONARY[PROVIDER][resource.image].total_instance_number] }}</span></div>
         </div>
         <div class="row-wrap"
-          *ngIf="DICTIONARY.cloud_provider === 'gcp' && resource.image === 'docker.dlab-dataengine-service'">
+          *ngIf="DICTIONARY[PROVIDER].cloud_provider === 'gcp' && resource.image === 'docker.dlab-dataengine-service'">
           <div class="col">
-            <p>{{ DICTIONARY[resource.image].slave_instance_number }}:</p>
+            <p>Slave instance number:</p>
           </div>
-          <div class="col"><span>{{ resource[DICTIONARY[resource.image].total_slave_instance_number] }}</span></div>
+          <div class="col"><span>{{ resource[DICTIONARY[PROVIDER][resource.image].total_slave_instance_number] }}</span></div>
         </div>
         <div class="row-wrap">
           <div class="col">
-            <p>{{ DICTIONARY[resource.image].data_engine_master_instance_size }}:</p>
+            <p>Master instance size:</p>
           </div>
-          <div class="col"><span>{{ resource[DICTIONARY[resource.image].master_node_shape] }}</span></div>
+          <div class="col"><span>{{ resource[DICTIONARY[PROVIDER][resource.image].master_node_shape] }}</span></div>
         </div>
         <div class="row-wrap" *ngIf="resource.image === 'docker.dlab-dataengine-service'">
           <div class="col">
-            <p>{{ DICTIONARY[resource.image].data_engine_slave_instance_size }}:</p>
+            <p>Slave instance size:</p>
           </div>
-          <div class="col"><span>{{ resource[DICTIONARY[resource.image].slave_node_shape] }}</span></div>
+          <div class="col"><span>{{ resource[DICTIONARY[PROVIDER][resource.image].slave_node_shape] }}</span></div>
         </div>
 
         <div *ngIf="resource.status === 'running'">
@@ -98,16 +98,19 @@
             <p class="time_info">Up time {{upTimeInHours}} hour(s) since {{upTimeSince || "not specified."}}</p>
           </div>
           <div class="m-top-10">
-            <p *ngFor="let item of resource.computational_url" class="ellipsis">
-              <strong>{{ item.description }}:</strong>&nbsp;
+            <p *ngFor="let item of resource.computational_url" class="ellipsis flex">
+              <span class="strong">{{ item.description }}:</span>&nbsp;
               <a href="{{item.url}}" target="_blank" matTooltip="{{item.url}}"
                 matTooltipPosition="above">{{ item.url }}</a>
             </p>
           </div>
         </div>
 
-        <div class="checkbox-group"
-          *ngIf="resource.image === 'docker.dlab-dataengine' && resource.status === 'running' && environment.image !== 'docker.dlab-zeppelin'">
+        <div class="checkbox-group" *ngIf="resource.image === 'docker.dlab-dataengine'
+          && resource.status === 'running'
+          && environment.image !== 'docker.dlab-zeppelin'
+          && environment.image !== 'docker.dlab-superset'
+          && environment.image !== 'docker.dlab-jupyterlab'">
           <label>
             <input #configurationNode type="checkbox" (change)="selectConfiguration()" /> Cluster configurations
           </label>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
index cc0211b..16fb1ae 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
@@ -35,6 +35,7 @@
 
 export class DetailComputationalResourcesComponent implements OnInit {
   readonly DICTIONARY = DICTIONARY;
+  readonly PROVIDER = this.data.environment.cloud_provider;
 
   resource: any;
   environment: any;
@@ -55,7 +56,7 @@
   ) { }
 
   ngOnInit() {
-    this.open(this.data.environment, this.data.resource)
+    this.open(this.data.environment, this.data.resource);
   }
 
   public open(environment, resource): void {
@@ -63,6 +64,7 @@
     this.resource = resource;
     this.environment = environment;
 
+
     this.upTimeInHours = (this.resource.up_time) ? DateUtils.diffBetweenDatesInHours(this.resource.up_time) : 0;
     this.upTimeSince = (this.resource.up_time) ? new Date(this.resource.up_time).toString() : '';
     this.initFormModel();
@@ -87,14 +89,16 @@
 
   public getClusterConfiguration(): void {
     this.dataengineConfigurationService
-      .getClusterConfiguration(this.environment.name, this.resource.computational_name)
+      .getClusterConfiguration(this.environment.project, this.environment.name, this.resource.computational_name, this.PROVIDER)
       .subscribe((result: any) => this.config = result,
         error => this.toastr.error(error.message || 'Configuration loading failed!', 'Oops!'));
   }
 
   public editClusterConfiguration(data): void {
     this.dataengineConfigurationService
-      .editClusterConfiguration(data.configuration_parameters, this.environment.name, this.resource.computational_name)
+      .editClusterConfiguration(
+        data.configuration_parameters, this.environment.project, this.environment.name, this.resource.computational_name, this.PROVIDER
+      )
       .subscribe(result => {
         this.dialogRef.close();
       },
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
index cf41f0c..ec3c3ac 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
@@ -33,8 +33,8 @@
               <label class="label">Select cluster type</label>
               <div class="control selector-wrapper">
                 <mat-form-field>
-                  <mat-label>Select cluster type</mat-label>
-                  <mat-select formControlName="template_name" disableOptionCentering>
+<!--                  <mat-label>Select cluster type</mat-label>-->
+                  <mat-select formControlName="template_name" disableOptionCentering placeholder="Select cluster type">
                     <mat-option *ngFor="let type of clusterTypes" [value]="type.template_name"
                       (click)="selectImage(type)">{{ type.template_name }}</mat-option>
                     <mat-option *ngIf="!clusterTypes.length" class="multiple-select ml-10" disabled>Clusters types list
@@ -69,17 +69,18 @@
               <label class="label">Cluster alias</label>
               <div class="control">
                 <input
-                  [class.danger_field]="computationalResourceExist || !resourceForm?.controls['cluster_alias_name'].valid
+                  [class.danger_field]="!resourceForm?.controls['cluster_alias_name'].valid
                         && resourceForm?.controls['cluster_alias_name'].dirty && resourceForm?.controls['cluster_alias_name'].hasError('duplication')"
                   type="text" class="form-control" placeholder="Enter cluster alias"
                   formControlName="cluster_alias_name" />
-                <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('duplication')">This
-                  cluster name already exists.</span>
+                <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('user-duplication')">You have cluster with this name in current project.</span>
+                <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('other-user-duplication')">Other user has cluster with this name in current project.</span>
                 <span class="error" *ngIf="!resourceForm?.controls.cluster_alias_name.valid
                                             && resourceForm?.controls['cluster_alias_name'].dirty
-                                            && !resourceForm?.controls['cluster_alias_name'].hasError('duplication')">
-                  Cluster name <span *ngIf="DICTIONARY.cloud_provider !== 'aws'">cannot be longer than 10 characters
-                    and</span> can only contain letters, numbers, hyphens and '_' but can not end with special
+                                            && !resourceForm?.controls['cluster_alias_name'].hasError('user-duplication')
+                                            && !resourceForm?.controls['cluster_alias_name'].hasError('other-user-duplication')">
+                  Cluster name cannot be longer than {{DICTIONARY[PROVIDER].max_cluster_name_length}} characters
+                  and can only contain letters, numbers, hyphens and '_' but can not end with special
                   characters
                 </span>
               </div>
@@ -88,7 +89,7 @@
 
           <div class="col">
             <div class="control-group" *ngIf="selectedImage?.image">
-              <label class="label">{{ DICTIONARY[selectedImage.image].instance_number }}</label>
+              <label class="label">Total instance number</label>
               <div class="control">
                 <input type="number" class="form-control" min="{{minInstanceNumber}}" max="{{maxInstanceNumber}}"
                   formControlName="instance_number" (keypress)="CheckUtils.isNumberKey($event)" />
@@ -100,10 +101,10 @@
             </div>
 
             <div class="control-group" *ngIf="selectedImage?.image">
-              <label class="label">{{ DICTIONARY[selectedImage.image].data_engine_master_instance_size }}</label>
+              <label class="label">Master instance size</label>
               <div class="control selector-wrapper">
                 <mat-form-field>
-                  <mat-label>Select {{ DICTIONARY.notebook_instance_size }}</mat-label>
+                  <mat-label>Select instance size</mat-label>
                   <mat-select formControlName="shape_master" disableOptionCentering>
                     <mat-optgroup *ngFor="let item of (selectedImage.computation_resources_shapes | keys)"
                       [label]="item.key | underscoreless">
@@ -121,10 +122,10 @@
 
             <div class="control-group" *ngIf="selectedImage?.image"
               [hidden]="selectedImage?.image === 'docker.dlab-dataengine'">
-              <label class="label">{{ DICTIONARY[selectedImage.image].data_engine_slave_instance_size }}</label>
+              <label class="label">Slave instance size</label>
               <div class="control selector-wrapper">
                 <mat-form-field>
-                  <mat-label>Select {{ DICTIONARY.notebook_instance_size }}</mat-label>
+                  <mat-label>Select instance size</mat-label>
                   <mat-select formControlName="shape_slave" disableOptionCentering>
                     <mat-optgroup *ngFor="let item of (selectedImage.computation_resources_shapes | keys)"
                       [label]="item.key | underscoreless">
@@ -194,7 +195,7 @@
           </label>
           <div class="config-link" *ngIf="(configuration?.nativeElement['checked'] || false)
             && selectedImage?.image === 'docker.dlab-dataengine-service'
-            && DICTIONARY.cloud_provider === 'aws'">
+            && PROVIDER === 'aws'">
             To view example JSON of configurations refer for <a
               href="https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html" target="_blank">AWS
               official documentation</a>
@@ -219,7 +220,7 @@
         </div>
         <div class="text-center m-top-30">
           <button mat-raised-button type="button" (click)="dialogRef.close()" class="butt action">Cancel</button>
-          <button mat-raised-button type="button" [disabled]="!resourceForm?.valid || 
+          <button mat-raised-button type="button" [disabled]="!resourceForm?.valid ||
               (selectedImage?.image === 'docker.dlab-dataengine-service' && !resourceForm.value.shape_slave) ||
               (selectedImage?.image === 'docker.dlab-dataengine-service' && !resourceForm.value.version)"
             (click)="createComputationalResource(resourceForm.value)" class="butt butt-success action"
@@ -231,8 +232,8 @@
 
     </div>
     <ng-template #placeholder>
-      <div *ngIf="!loading" class="info message">Computational resource creations are not available.<br>Please, check
-        your permissions.</div>
+      <div *ngIf="!loading && !clusterTypes?.length" class="info message">
+        Computational resource creations are not available.<br>Please, check your permissions.</div>
       <div *ngIf="loading" class="info message">
         Computational resource data is processing
         <mat-progress-bar mode="indeterminate"></mat-progress-bar>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
index bcb5d61..0c867f9 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
@@ -18,7 +18,7 @@
  */
 
 import { Component, OnInit, ViewChild, Inject, ChangeDetectorRef } from '@angular/core';
-import { FormGroup, FormBuilder, Validators } from '@angular/forms';
+import {FormGroup, FormBuilder, Validators, FormControl} from '@angular/forms';
 import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog';
 import { ToastrService } from 'ngx-toastr';
 
@@ -36,7 +36,7 @@
 })
 
 export class ComputationalResourceCreateDialogComponent implements OnInit {
-  readonly PROVIDER = DICTIONARY.cloud_provider;
+  readonly PROVIDER = this.data.notebook.cloud_provider;
   readonly DICTIONARY = DICTIONARY;
   readonly CLUSTER_CONFIGURATION = CLUSTER_CONFIGURATION;
   readonly CheckUtils = CheckUtils;
@@ -44,6 +44,8 @@
   notebook_instance: any;
   resourcesList: any;
   clusterTypes = [];
+  userComputations = [];
+  projectComputations = [];
   selectedImage: any;
   spotInstance: boolean = true;
 
@@ -76,8 +78,7 @@
     this.notebook_instance = this.data.notebook;
     this.resourcesList = this.data.full_list;
     this.initFormModel();
-
-    this.getTemplates(this.notebook_instance.project, this.notebook_instance.endpoint);
+    this.getTemplates(this.notebook_instance.project, this.notebook_instance.endpoint, this.notebook_instance.cloud_provider);
   }
 
   public selectImage($event) {
@@ -85,7 +86,7 @@
     this.getComputationalResourceLimits();
 
     if ($event.templates && $event.templates.length)
-      this.resourceForm.controls['version'].setValue($event.templates[0].version)
+      this.resourceForm.controls['version'].setValue($event.templates[0].version);
   }
 
   public selectSpotInstances($event?): void {
@@ -123,14 +124,14 @@
   }
 
   public isAvailableSpots(): boolean {
-    if (DICTIONARY.cloud_provider === 'aws' && this.selectedImage.image === 'docker.dlab-dataengine-service')
+    if (this.PROVIDER === 'aws' && this.selectedImage.image === 'docker.dlab-dataengine-service')
       return !!Object.keys(this.filterAvailableSpots()).length;
 
     return false;
   }
 
   public createComputationalResource(data) {
-    this.model.createComputationalResource(data, this.selectedImage, this.notebook_instance, this.spotInstance)
+    this.model.createComputationalResource(data, this.selectedImage, this.notebook_instance, this.spotInstance, this.PROVIDER.toLowerCase())
       .subscribe((response: any) => {
         if (response.status === HTTP_STATUS_CODES.OK) this.dialogRef.close();
       }, error => this.toastr.error(error.message, 'Oops!'));
@@ -142,10 +143,12 @@
       version: [''],
       shape_master: ['', Validators.required],
       shape_slave: [''],
-      cluster_alias_name: ['', [Validators.required, Validators.pattern(PATTERNS.namePattern),
-      this.providerMaxLength, this.checkDuplication.bind(this)]],
+      cluster_alias_name: ['', [Validators.required, Validators.pattern(PATTERNS.namePattern), Validators.maxLength(DICTIONARY[this.PROVIDER].max_cluster_name_length),
+      this.checkDuplication.bind(this)]],
       instance_number: ['', [Validators.required, Validators.pattern(PATTERNS.nodeCountPattern), this.validInstanceNumberRange.bind(this)]],
-      preemptible_instance_number: [0, Validators.compose([Validators.pattern(PATTERNS.integerRegex), this.validPreemptibleRange.bind(this)])],
+      preemptible_instance_number: [0,
+        Validators.compose([Validators.pattern(PATTERNS.integerRegex),
+        this.validPreemptibleRange.bind(this)])],
       instance_price: [0, [this.validInstanceSpotRange.bind(this)]],
       configuration_parameters: ['', [this.validConfiguration.bind(this)]],
       custom_tag: [this.notebook_instance.tags.custom_tag]
@@ -158,17 +161,17 @@
 
   private getComputationalResourceLimits(): void {
     if (this.selectedImage && this.selectedImage.image) {
-      const activeImage = DICTIONARY[this.selectedImage.image];
+      const activeImage = DICTIONARY[this.PROVIDER][this.selectedImage.image];
 
       this.minInstanceNumber = this.selectedImage.limits[activeImage.total_instance_number_min];
       this.maxInstanceNumber = this.selectedImage.limits[activeImage.total_instance_number_max];
 
-      if (DICTIONARY.cloud_provider === 'gcp' && this.selectedImage.image === 'docker.dlab-dataengine-service') {
+      if (this.PROVIDER === 'gcp' && this.selectedImage.image === 'docker.dlab-dataengine-service') {
         this.maxInstanceNumber = this.selectedImage.limits[activeImage.total_instance_number_max] - 1;
         this.minPreemptibleInstanceNumber = this.selectedImage.limits.min_dataproc_preemptible_instance_count;
       }
 
-      if (DICTIONARY.cloud_provider === 'aws' && this.selectedImage.image === 'docker.dlab-dataengine-service') {
+      if (this.PROVIDER === 'aws' && this.selectedImage.image === 'docker.dlab-dataengine-service') {
         this.minSpotPrice = this.selectedImage.limits.min_emr_spot_instance_bid_pct;
         this.maxSpotPrice = this.selectedImage.limits.max_emr_spot_instance_bid_pct;
 
@@ -184,7 +187,7 @@
   //  Validation
   private validInstanceNumberRange(control) {
     if (control && control.value)
-      if (DICTIONARY.cloud_provider === 'gcp' && this.selectedImage.image === 'docker.dlab-dataengine-service') {
+      if (this.PROVIDER === 'gcp' && this.selectedImage.image === 'docker.dlab-dataengine-service') {
         this.validPreemptibleNumberRange();
         return control.value >= this.minInstanceNumber && control.value <= this.maxInstanceNumber ? null : { valid: false };
       } else {
@@ -228,23 +231,26 @@
   }
 
   private checkDuplication(control) {
-    if (this.containsComputationalResource(control.value))
-      return { duplication: true };
+    if (this.containsComputationalResource(control.value, this.userComputations)){
+      return { 'user-duplication': true };
+    }
+
+    if (this.containsComputationalResource(control.value, this.projectComputations)){
+      return { 'other-user-duplication': true };
+    }
   }
 
-  private providerMaxLength(control) {
-    if (DICTIONARY.cloud_provider !== 'aws')
-      return control.value.length <= DICTIONARY.max_cluster_name_length ? null : { valid: false };
-  }
-
-  private getTemplates(project, endpoint) {
-    this.userResourceService.getComputationalTemplates(project, endpoint).subscribe(
+  private getTemplates(project, endpoint, provider) {
+    this.userResourceService.getComputationalTemplates(project, endpoint, provider).subscribe(
       clusterTypes => {
-        this.clusterTypes = clusterTypes;
-        this.selectedImage = clusterTypes[0];
+        this.clusterTypes = clusterTypes.templates;
+        this.userComputations = clusterTypes.user_computations;
+        this.projectComputations = clusterTypes.project_computations;
+
+        this.clusterTypes.forEach((cluster, index) => this.clusterTypes[index].computation_resources_shapes = SortUtils.shapesSort(cluster.computation_resources_shapes));
+        this.selectedImage = clusterTypes.templates[0];
 
         if (this.selectedImage) {
-          this.loading = false;
           this._ref.detectChanges();
 
           this.filterShapes();
@@ -252,6 +258,7 @@
           this.getComputationalResourceLimits();
         }
 
+        this.loading = false;
       }, () => this.loading = false);
   }
 
@@ -267,7 +274,7 @@
           return obj;
         }, {});
 
-      if (DICTIONARY.cloud_provider !== 'azure') {
+      if (this.PROVIDER !== 'azure') {
         const images = this.clusterTypes.filter(image => image.image === 'docker.dlab-dataengine');
         this.clusterTypes = images;
         this.selectedImage = this.clusterTypes[0];
@@ -287,10 +294,10 @@
     return filtered;
   }
 
-  private containsComputationalResource(conputational_resource_name: string): boolean {
+  private containsComputationalResource(conputational_resource_name: string, existNames: Array<string>): boolean {
     if (conputational_resource_name) {
-      return this.notebook_instance.resources.some(resource =>
-        CheckUtils.delimitersFiltering(conputational_resource_name) === CheckUtils.delimitersFiltering(resource.computational_name));
+      return existNames.some(resource =>
+        CheckUtils.delimitersFiltering(conputational_resource_name) === CheckUtils.delimitersFiltering(resource));
     }
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create.model.ts
index b5a1a32..ce80c52 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create.model.ts
@@ -22,17 +22,16 @@
 import { Observable } from 'rxjs';
 
 import { UserResourceService } from '../../../core/services';
-import { DICTIONARY } from '../../../../dictionary/global.dictionary';
 
 @Injectable()
 export class ComputationalResourceModel {
 
   constructor(private userResourceService: UserResourceService) { }
 
-  public createComputationalResource(parameters, image, env, spot): Observable<{}> {
+  public createComputationalResource(parameters, image, env, spot, provider): Observable<{}> {
     const config = parameters.configuration_parameters ? JSON.parse(parameters.configuration_parameters) : null;
 
-    if (DICTIONARY.cloud_provider === 'aws' && image.image === 'docker.dlab-dataengine-service') {
+    if (provider === 'aws' && image.image === 'docker.dlab-dataengine-service') {
       return this.userResourceService.createComputationalResource_DataengineService({
         name: parameters.cluster_alias_name,
         emr_instance_count: parameters.instance_number,
@@ -47,8 +46,8 @@
         config: config,
         project: env.project,
         custom_tag: parameters.custom_tag
-      });
-    } else if (DICTIONARY.cloud_provider === 'gcp' && image.image === 'docker.dlab-dataengine-service') {
+      }, provider);
+    } else if (provider === 'gcp' && image.image === 'docker.dlab-dataengine-service') {
       return this.userResourceService.createComputationalResource_DataengineService({
         template_name: image.template_name,
         image: image.image,
@@ -63,7 +62,7 @@
         config: config,
         project: env.project,
         custom_tag: parameters.custom_tag
-      });
+      }, provider);
     } else {
       return this.userResourceService.createComputationalResource_Dataengine({
         name: parameters.cluster_alias_name,
@@ -75,7 +74,7 @@
         config: config,
         project: env.project,
         custom_tag: parameters.custom_tag
-      });
+      }, provider);
     }
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource.model.ts
new file mode 100644
index 0000000..681f4fd
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource.model.ts
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/* tslint:disable:no-empty */
+
+
+import {Injectable} from '@angular/core';
+
+@Injectable()
+
+export class ComputationModel {
+  constructor(
+    public project: string,
+    public exploratory: string,
+    public resource: Array<any>,
+    public status: string,
+  ) { }
+
+  public static computationRes(data) {
+    if (data) {
+      return data.map(value => value.exploratory.map(el => el.resources.map( resource => new ComputationModel(
+        value.project,
+        el.name,
+        resource.computational_name,
+        resource.status,
+      ))));
+    }
+  }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
index a64244f..a4b825e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
@@ -27,6 +27,7 @@
      display: flex;
      justify-content: space-between;
      height: 26px;
+     align-items: center;
 
      .resource-name {
        width: 55%;
@@ -66,6 +67,9 @@
 
        .schedule {
          cursor: pointer;
+         display: flex;
+         align-items: center;
+         font-size: 18px;
 
          &.not-allowed {
            pointer-events: none;
@@ -77,7 +81,8 @@
        }
 
        .start-stop-action {
-
+         display: flex;
+         align-items: center;
          font-size: 18px;
          color: #36afd5;
 
@@ -93,6 +98,8 @@
        .remove_butt {
          cursor: pointer;
          color: #ef5c4b;
+         display: flex;
+         align-items: center;
 
          &.disabled {
            color: #f5d3d3;
@@ -102,3 +109,23 @@
      }
    }
  }
+ @media screen and (max-width: 1520px) {
+   .resources,
+   managment {
+     .source {
+       .resource-wrap {
+         .resource-name {
+           width: 45%;
+         }
+
+         .resource-status {
+           width: 40%;
+         }
+
+         .resource-actions {
+           width: 15%;
+         }
+       }
+     }
+   }
+ }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
index 1dbbfa3..7bc5126 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
@@ -54,13 +54,13 @@
       dialogRef.afterClosed().subscribe(result => {
         if (result && action === 'stop') {
           this.userResourceService
-            .toggleStopStartAction(this.environment.project, this.environment.name, resource.computational_name, action)
+            .toggleStopStartAction(this.environment.project, this.environment.name, resource.computational_name, action, this.environment.cloud_provider)
             .subscribe(() => {
               this.rebuildGrid();
             });
         } else if (result && action === 'terminate') {
           this.userResourceService
-            .suspendComputationalResource(this.environment.name, resource.computational_name)
+            .suspendComputationalResource(this.environment.project, this.environment.name, resource.computational_name, this.environment.cloud_provider)
             .subscribe(() => {
               this.rebuildGrid();
             });
@@ -68,7 +68,7 @@
       });
     } else if (action === 'start') {
       this.userResourceService
-        .toggleStopStartAction(this.environment.project, this.environment.name, resource.computational_name, 'start')
+        .toggleStopStartAction(this.environment.project, this.environment.name, resource.computational_name, 'start', this.environment.cloud_provider)
         .subscribe(
           () => this.rebuildGrid(),
           error => this.toastr.error(error.message || 'Computational resource starting failed!', 'Oops!'));
@@ -76,12 +76,14 @@
   }
 
   public detailComputationalResources(environment, resource): void {
-    this.dialog.open(DetailComputationalResourcesComponent, { data: { environment, resource }, panelClass: 'modal-sm' })
+    this.dialog.open(DetailComputationalResourcesComponent, { data:
+        { environment, resource }, panelClass: 'modal-sm' })
       .afterClosed().subscribe(() => this.rebuildGrid());
-  };
+  }
 
   public openScheduleDialog(resource) {
-    this.dialog.open(SchedulerComponent, { data: { notebook: this.environment, type: 'СOMPUTATIONAL', resource }, panelClass: 'modal-xl-s' })
+    this.dialog.open(SchedulerComponent, { data:
+        { notebook: this.environment, type: 'СOMPUTATIONAL', resource }, panelClass: 'modal-xl-s' })
       .afterClosed().subscribe(() => this.rebuildGrid());
   }
 }
@@ -95,11 +97,18 @@
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </div>
   <div mat-dialog-content class="content">
-    <p>Computational resource <strong> {{ data.resource.computational_name }}</strong> will be 
+    <div class="dialog-max-width">
+        Computational resource
+          <span class="strong" matTooltip="{{ data.resource.computational_name }}"
+                  [matTooltipShowDelay]="2000"
+                  matTooltipPosition="left"
+          >
+                {{ data.resource.computational_name }}
+            </span> will be
       <span *ngIf="data.action === 'terminate'"> decommissioned.</span>
       <span *ngIf="data.action === 'stop'">stopped.</span>
-    </p>
-    <p class="m-top-20"><strong>Do you want to proceed?</strong></p>
+    </div>
+    <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
   </div>
   <div class="text-center">
     <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
index cc817fe..4a7a4a6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
@@ -19,7 +19,7 @@
 
 <div class="ami-dialog" id="dialog-box">
   <header class="dialog-header">
-    <h4 class="modal-title">Create {{ DICTIONARY.image }}</h4>
+    <h4 class="modal-title">Create image</h4>
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </header>
   <div class="dialog-content">
@@ -28,12 +28,12 @@
         <div class="control-group">
           <label class="label">Name</label>
           <div class="control">
-            <input type="text" formControlName="name" placeholder="Enter {{ DICTIONARY.image }} name">
-            <span class="danger_color" *ngIf="createAMIForm.controls['name'].hasError('duplication')">This {{ DICTIONARY.image }} name already exists.</span>
-            <span class="danger_color" *ngIf="!createAMIForm.valid && createAMIForm.controls['name'].dirty && !createAMIForm.controls['name'].hasError('duplication')">
-              {{ DICTIONARY.image }} name
-              <span *ngIf="DICTIONARY.cloud_provider === 'azure'"> cannot be longer than 10 characters and</span> 
-              can only contain letters, numbers, hyphens and '_'</span>
+            <input type="text" formControlName="name" placeholder="Enter image name">
+            <span class="error" *ngIf="createAMIForm.controls['name'].hasError('duplication')">This
+              image name already exists in project.</span>
+            <span class="error"
+              *ngIf="!createAMIForm.valid && createAMIForm.controls['name'].dirty && !createAMIForm.controls['name'].hasError('duplication')">
+              Name cannot be longer than 10 characters and can only contain letters, numbers, hyphens and '_' but can not end with special characters</span>
           </div>
         </div>
         <div class="control-group">
@@ -45,7 +45,8 @@
       </form>
       <div class="text-center m-top-30 m-bott-10">
         <button mat-raised-button type="button" class="butt action" (click)="dialogRef.close()">Cancel</button>
-        <button mat-raised-button type="button" [disabled]="!createAMIForm.valid" (click)="assignChanges(createAMIForm.value)" class="butt butt-success action">Create</button>
+        <button mat-raised-button type="button" [disabled]="!createAMIForm.valid"
+          (click)="assignChanges(createAMIForm.value)" class="butt butt-success action">Create</button>
       </div>
     </div>
   </div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
index bfa4f3a..e783951 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
@@ -23,7 +23,7 @@
 import { ToastrService } from 'ngx-toastr';
 
 import { UserResourceService } from '../../../core/services';
-import { HTTP_STATUS_CODES } from '../../../core/util';
+import {HTTP_STATUS_CODES, PATTERNS} from '../../../core/util';
 import { DICTIONARY } from '../../../../dictionary/global.dictionary';
 
 @Component({
@@ -35,8 +35,7 @@
   readonly DICTIONARY = DICTIONARY;
   public notebook: any;
   public createAMIForm: FormGroup;
-
-  namePattern = '[-_a-zA-Z0-9]+';
+  public provider: string;
   delimitersRegex = /[-_]?/g;
   imagesList: any;
 
@@ -49,42 +48,33 @@
   ) { }
 
   ngOnInit() {
-    this._userResource.getImagesList().subscribe(res => this.imagesList = res);
-    this.open(this.data);
-  }
-
-  public open(notebook): void {
-    this.notebook = notebook;
+    this.notebook = this.data;
+    this.provider = this.data.cloud_provider;
 
     this.initFormModel();
-    this._userResource.getImagesList().subscribe(res => this.imagesList = res);
+    this._userResource.getImagesList(this.data.project).subscribe(res => this.imagesList = res);
   }
 
   public assignChanges(data) {
     this._userResource.createAMI(data).subscribe(
-      response => {
-        if (response.status === HTTP_STATUS_CODES.ACCEPTED) this.dialogRef.close();
-      },
-      error => this.toastr.error(error.message || `${DICTIONARY.image.toLocaleUpperCase()} creation failed!`, 'Oops!'));
+      response => response.status === HTTP_STATUS_CODES.ACCEPTED && this.dialogRef.close(),
+      error => this.toastr.error(error.message || `Image creation failed!`, 'Oops!'));
   }
 
   private initFormModel(): void {
     this.createAMIForm = this._fb.group({
-      name: ['', [Validators.required, Validators.pattern(this.namePattern), this.providerMaxLength, this.checkDuplication.bind(this)]],
+      name: ['', [Validators.required, Validators.pattern(PATTERNS.namePattern), this.providerMaxLength, this.checkDuplication.bind(this)]],
       description: [''],
-      exploratory_name: [this.notebook.name]
+      exploratory_name: [this.notebook.name],
+      project_name: [this.notebook.project]
     });
   }
 
   private providerMaxLength(control) {
-    if (DICTIONARY.cloud_provider !== 'aws')
+    if (control && control.value)
       return control.value.length <= 10 ? null : { valid: false };
   }
 
-  private delimitersFiltering(resource): string {
-    return resource.replace(this.delimitersRegex, '').toString().toLowerCase();
-  }
-
   private checkDuplication(control) {
     if (control.value)
       return this.isDuplicate(control.value) ? { duplication: true } : null;
@@ -97,4 +87,8 @@
     }
     return false;
   }
+
+  private delimitersFiltering(resource): string {
+    return resource.replace(this.delimitersRegex, '').toString().toLowerCase();
+  }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.html
index 700cc54..1331f5f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.html
@@ -34,25 +34,25 @@
       <div class="billing-detail content-box">
         <mat-list>
             <mat-list-item class="list-header">
-              <div class="resource-name ellipsis" [ngClass]="{ 'wide-name-field' : DICTIONARY.cloud_provider === 'azure' }">Name</div>
-              <div class="service">{{ DICTIONARY.service }}</div>
-              <div class="resource-type" *ngIf="DICTIONARY.cloud_provider === 'aws'">{{ DICTIONARY.type }}</div>
-              <div class="cost-currency">Cost</div>
+              <div class="resource-name ellipsis" [ngClass]="{ 'wide-name-field' : provider === 'azure' }">Name</div>
+              <div class="service">Product</div>
+<!--              <div class="resource-type" *ngIf="provider === 'aws'">Type</div>-->
               <div class="usage-date-start">Start</div>
               <div class="usage-date-end">End</div>
+              <div class="cost-currency">Cost</div>
             </mat-list-item>
             <div class="scrolling-content" id="scrolling">
-              <mat-list-item *ngFor="let item of notebook.billing">
-                <div class="resource-name" [ngClass]="{ 'wide-name-field' : DICTIONARY.cloud_provider === 'azure' }"
-                     matTooltip="{{ item[DICTIONARY.billing.resourceName] }}"
+              <mat-list-item *ngFor="let item of notebook.billing.report_lines">
+                <div class="resource-name" [ngClass]="{ 'wide-name-field' : provider === 'azure' }"
+                     matTooltip="{{ item.resource_name }}"
                      matTooltipPosition="above">
-                     {{ item[DICTIONARY.billing.resourceName] }}
+                     {{ item.resource_name }}
                 </div>
-                <div class="service">{{ item[DICTIONARY.billing.service] }}</div>
-                <div class="resource-type" *ngIf="DICTIONARY.cloud_provider === 'aws'">{{ item[DICTIONARY.billing.type] }}</div>
-                <div class="cost-currency">{{ item[DICTIONARY.billing.cost] }} {{ item[DICTIONARY.billing.currencyCode] }}</div>
-                <div class="usage-date-start">{{ item[DICTIONARY.billing.dateFrom] | date }}</div>
-                <div class="usage-date-end">{{ item[DICTIONARY.billing.dateTo] | date }}</div>
+                <div class="service">{{ item.product }}</div>
+<!--                <div class="resource-type" >{{ item.resourse_type }}</div>-->
+                <div class="usage-date-start">{{ item.from | date }}</div>
+                <div class="usage-date-end">{{ item.to | date }}</div>
+                <div class="cost-currency">{{ item.cost }} {{ item.currency }}</div>
               </mat-list-item>
             </div>
         </mat-list>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.scss
index 64ef43c..18998ea 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.scss
@@ -43,7 +43,7 @@
   .resource-name,
   .usage-date-start,
   .usage-date-end {
-    width: 15%;
+    width: 20%;
     overflow: hidden;
     text-overflow: ellipsis;
     padding-right: 10px;
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.ts
index 3243b77..e115cad 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/cost-details-dialog/cost-details-dialog.component.ts
@@ -29,6 +29,7 @@
 export class CostDetailsDialogComponent implements OnInit {
   readonly DICTIONARY = DICTIONARY;
   public notebook: any;
+  public provider: string;
 
   constructor(
     @Inject(MAT_DIALOG_DATA) public data: any,
@@ -37,5 +38,6 @@
 
   ngOnInit() {
     this.notebook = this.data;
+    this.provider = this.notebook.cloud_provider;
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.html
index 7dd58d9..43b3c71 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.html
@@ -29,8 +29,7 @@
           <label class="label">Select project</label>
           <div class="control selector-wrapper">
             <mat-form-field>
-              <mat-label>Select project</mat-label>
-              <mat-select formControlName="project" disableOptionCentering>
+              <mat-select formControlName="project" panelClass="create-resources-dialog" placeholder="Select project">
                 <mat-option *ngFor="let project of projects" [value]="project.name" (click)="setEndpoints(project)">
                   {{ project.name }}</mat-option>
                 <mat-option *ngIf="!projects.length" class="multiple-select ml-10" disabled>Projects list is empty
@@ -47,8 +46,8 @@
           <label class="label">Select endpoint</label>
           <div class="control selector-wrapper" [ngClass]="{ 'not-active' : !endpoints.length }">
             <mat-form-field>
-              <mat-label>Select endpoint</mat-label>
-              <mat-select formControlName="endpoint" disableOptionCentering [disabled]="!endpoints.length">
+              <mat-select formControlName="endpoint" disableOptionCentering [disabled]="!endpoints.length"
+                panelClass="create-resources-dialog" placeholder="Select endpoint">
                 <mat-option *ngFor="let endpoint of endpoints" [value]="endpoint"
                   (click)="getTemplates(createExploratoryForm?.controls['project'].value, endpoint)">{{ endpoint }}
                 </mat-option>
@@ -66,8 +65,8 @@
           <label class="label">Select template</label>
           <div class="control selector-wrapper" [ngClass]="{ 'not-active' : !templates.length }">
             <mat-form-field>
-              <mat-label>Select template</mat-label>
-              <mat-select formControlName="version" disableOptionCentering [disabled]="!templates.length">
+              <mat-select formControlName="version" disableOptionCentering [disabled]="!templates.length"
+                panelClass="create-resources-dialog" placeholder="Select template">
                 <mat-option *ngFor="let template of templates"
                   [value]="template.exploratory_environment_versions[0].version" (click)="getShapes(template)">
                   {{ template.exploratory_environment_versions[0].template_name }}
@@ -83,10 +82,10 @@
         </div>
 
         <div class="control-group" *ngIf="images && images.length > 0">
-          <label class="label">Select {{ DICTIONARY.image }}</label>
+          <label class="label">Select image</label>
           <div class="control selector-wrapper">
             <mat-form-field>
-              <mat-label>Select {{ DICTIONARY.image }}</mat-label>
+              <mat-label>Select image</mat-label>
               <mat-select formControlName="notebook_image_name" disableOptionCentering>
                 <mat-option [value]="null">None</mat-option>
                 <mat-option *ngFor="let image of images" [value]="image.name">{{ image.name }}</mat-option>
@@ -107,23 +106,23 @@
                     && createExploratoryForm?.controls['name'].dirty
                     && createExploratoryForm?.controls['name'].hasError('duplication')" type="text"
               class="form-control" placeholder="Enter Name" formControlName="name">
-            <span class="error" *ngIf="createExploratoryForm?.controls['name'].hasError('duplication')">This name
-              already exists.</span>
+            <span class="error" *ngIf="createExploratoryForm?.controls['name'].hasError('duplication')">This name already exists in current project.</span>
             <span class="error" *ngIf="!createExploratoryForm?.controls.name.valid
                                         && createExploratoryForm?.controls['name'].dirty
                                         && !createExploratoryForm?.controls['name'].hasError('duplication')">Name
-              <span *ngIf="DICTIONARY.cloud_provider !== 'aws'">cannot be longer than 10 characters and</span> can only
+             cannot be longer than 10 characters and can only
               contain letters, numbers, hyphens and '_' but can not end with special characters
             </span>
           </div>
         </div>
 
         <div class="control-group">
-          <label class="label">{{ DICTIONARY.notebook_instance_size }}</label>
+          <label class="label">Instance size</label>
           <div class="control selector-wrapper" [ngClass]="{ 'not-active': !currentTemplate }">
             <mat-form-field>
-              <mat-label>Select {{ DICTIONARY.notebook_instance_size }}</mat-label>
-              <mat-select formControlName="shape" disableOptionCentering [disabled]="!currentTemplate">
+              <mat-label>Select instance size</mat-label>
+              <mat-select formControlName="shape" disableOptionCentering [disabled]="!currentTemplate"
+                panelClass="create-resources-shapes" placeholder="Instance size">
                 <mat-optgroup *ngFor="let item of (shapes | keys)" [label]="item.key | underscoreless">
                   <mat-option *ngFor="let list_item of item.value" [value]="list_item.Type">
                     <strong class="highlight icon-label">{{ list_item.Size }}</strong> {{ list_item.Type }}
@@ -148,7 +147,8 @@
         </div>
 
         <div *ngIf="currentTemplate">
-          <div class="checkbox-group" *ngIf="currentTemplate?.image !== 'docker.dlab-zeppelin'; else not_support">
+          <div class="checkbox-group"
+            *ngIf="currentTemplate?.image !== 'docker.dlab-zeppelin' && currentTemplate?.image !== 'docker.dlab-superset' && currentTemplate?.image !== 'docker.dlab-jupyterlab'">
             <label>
               <input #configurationNode type="checkbox" (change)="selectConfiguration()" /> Spark configurations
             </label>
@@ -160,14 +160,14 @@
                 parameters is not in a valid format</span>
             </div>
           </div>
-          <ng-template #not_support>
-            <small>Spark default configuration for Apache Zeppelin can not be changed from DLab UI. Currently it can be
-              done directly through Apache Zeppelin interpreter menu.
-              For more details please refer for Apache Zeppelin <a
-                href="https://zeppelin.apache.org/docs/0.8.0/usage/interpreter/overview.html" target="_blank">official
-                documentation</a>.
-            </small>
-          </ng-template>
+
+          <small *ngIf="currentTemplate?.image === 'docker.dlab-zeppelin'">
+            Spark default configuration for Apache Zeppelin can not be changed from DLab UI. Currently it can be
+            done directly through Apache Zeppelin interpreter menu.
+            For more details please refer for Apache Zeppelin <a
+              href="https://zeppelin.apache.org/docs/0.8.0/usage/interpreter/overview.html" target="_blank">official
+              documentation</a>.
+          </small>
         </div>
 
         <div class="text-center m-top-30">
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.scss
index 0592248..6711ad4 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.scss
@@ -69,7 +69,7 @@
 }
 
 @-moz-document url-prefix() {
-  form {
+  .create-environment form {
     padding-bottom: 30px;
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.ts
index e42ea92..b9fc3f0 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/create-environment/create-environment.component.ts
@@ -27,6 +27,7 @@
 import { CheckUtils, SortUtils, HTTP_STATUS_CODES, PATTERNS } from '../../../core/util';
 import { DICTIONARY } from '../../../../dictionary/global.dictionary';
 import { CLUSTER_CONFIGURATION } from '../../computational/computational-resource-create-dialog/cluster-configuration-templates';
+import {tap} from 'rxjs/operators';
 
 @Component({
   selector: 'create-environment',
@@ -37,6 +38,7 @@
 export class ExploratoryEnvironmentCreateComponent implements OnInit {
   readonly DICTIONARY = DICTIONARY;
   public createExploratoryForm: FormGroup;
+  public projectExploratories: {};
 
   projects: Project[] = [];
   templates = [];
@@ -60,8 +62,14 @@
   }
 
   ngOnInit() {
+    this.getNamesByProject();
     this.getUserProjects();
     this.initFormModel();
+    this.createExploratoryForm.get('project').valueChanges.subscribe(v => {
+      if ( this.createExploratoryForm.controls.name.value) {
+        this.createExploratoryForm.get('name').updateValueAndValidity();
+      }
+    });
   }
 
   public getProjects() {
@@ -73,20 +81,31 @@
       this.projects = projects;
       const activeProject = projects.find(item => item.name === this.resourceGrid.activeProject);
       if (this.resourceGrid.activeProject && activeProject) {
-        this.setEndpoints(activeProject)
+        this.setEndpoints(activeProject);
         this.createExploratoryForm.controls['project'].setValue(activeProject.name);
       }
     });
   }
 
   public setEndpoints(project) {
+    if (this.images) this.images = [];
+
     this.endpoints = project.endpoints
       .filter(e => e.status === 'RUNNING')
       .map(e => e.name);
   }
 
   public getTemplates(project, endpoint) {
-    this.userResourceService.getExploratoryTemplates(project, endpoint).subscribe(templates => this.templates = templates);
+    this.userResourceService.getExploratoryTemplates(project, endpoint)
+      .pipe(tap(results => {
+        results.sort((a, b) =>
+          (a.exploratory_environment_versions[0].template_name > b.exploratory_environment_versions[0].template_name) ?
+            1 : -1);
+      }))
+      .subscribe(templates =>  {
+        this.templates = templates;
+      }
+      );
   }
 
   public getShapes(template) {
@@ -101,12 +120,17 @@
       template_name: this.currentTemplate.exploratory_environment_versions[0].template_name
     };
 
-    data.cluster_config = data.cluster_config ? JSON.parse(data.cluster_config) : null
+    data.cluster_config = data.cluster_config ? JSON.parse(data.cluster_config) : null;
     this.userResourceService.createExploratoryEnvironment({ ...parameters, ...data }).subscribe((response: any) => {
       if (response.status === HTTP_STATUS_CODES.OK) this.dialogRef.close();
     }, error => this.toastr.error(error.message || 'Exploratory creation failed!', 'Oops!'));
   }
 
+  public getNamesByProject() {
+    this.userResourceService.getProjectByExploratoryEnvironment().subscribe(responce => {
+      this.projectExploratories = responce;
+    });
+  }
 
   public selectConfiguration() {
     const value = (this.configuration.nativeElement.checked && this.createExploratoryForm)
@@ -137,12 +161,14 @@
   }
 
   private checkDuplication(control) {
-    if (this.resourceGrid.containsNotebook(control.value))
+    if (this.createExploratoryForm
+      && this.createExploratoryForm.controls.project.value
+      && this.resourceGrid.containsNotebook(control.value, this.projectExploratories[this.createExploratoryForm.controls.project.value]))
       return { duplication: true };
   }
 
   private providerMaxLength(control) {
-    if (DICTIONARY.cloud_provider !== 'aws')
+    if (control && control.value)
       return control.value.length <= 10 ? null : { valid: false };
   }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.html
index 403c8a8..d41f69a 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.html
@@ -47,34 +47,43 @@
             <p>Open following URL(s) in your browser to access this box:</p>
             <div class="links_block">
               <p *ngFor="let item of notebook.url">
-                <strong class="description">{{item.description}}: &#32;</strong>
-                <a class="ellipsis" href="{{item.url}}" target="_blank">&#32;{{item.url}}</a>
+               <ng-container *ngIf="!(item.description === 'Ungit' && notebook.template_name.indexOf('Apache Zeppelin') !== -1)">
+                <span class="description">{{item.description}}: &nbsp;</span>
+                <a class="ellipsis" matTooltip="{{item.url}}" matTooltipPosition="above" href="{{item.url}}"
+                  target="_blank">
+                  &nbsp;{{item.url}}
+                </a>
+               </ng-container>
               </p>
             </div>
-            <p *ngIf="notebook.username">Node User: &#32;<strong>{{ notebook.username }}</strong></p>
-            <p *ngIf="notebook.password">Password: &#32;<strong>{{ notebook.password }}</strong></p>
+            <p class="flex" *ngIf="notebook.username">Node User: &nbsp;<span
+                class="strong">{{ notebook.username }}</span></p>
+            <p class="flex" *ngIf="notebook.password">Password: &nbsp;<span
+                class="strong">{{ notebook.password }}</span></p>
 
-            <p class="m-top-30">{{ DICTIONARY.personal_storage }}: &#32;</p>
+            <p class="m-top-30">{{ DICTIONARY[PROVIDER].personal_storage }}: &nbsp;</p>
             <div class="links_block">
-              <p *ngIf="DICTIONARY.cloud_provider === 'azure' && notebook.account_name">{{ DICTIONARY.account }}
-                <strong>{{ notebook.account_name}}</strong></p>
-              <p *ngIf="notebook.bucket_name">{{ DICTIONARY.container }} <strong>{{ notebook.bucket_name }}</strong></p>
+              <p *ngIf="DICTIONARY[PROVIDER].cloud_provider === 'azure' && notebook.account_name">{{ DICTIONARY[PROVIDER].account }}
+                <span class="bucket-info">{{ notebook.account_name}}</span></p>
+              <p *ngIf="notebook.bucket_name">{{ DICTIONARY[PROVIDER].container }} <span
+                  class="bucket-info">{{ notebook.bucket_name }}</span></p>
             </div>
-            <p>{{ DICTIONARY.collaboration_storage }}: &#32;</p>
+            <p>Shared endpoint bucket: &nbsp;</p>
             <div class="links_block">
-              <p *ngIf="DICTIONARY.cloud_provider === 'azure' && notebook.shared_account_name">{{ DICTIONARY.account }}
-                <strong>{{ notebook.shared_account_name}}</strong></p>
-              <p *ngIf="notebook.shared_bucket_name">{{ DICTIONARY.container }}
-                <strong>{{ notebook.shared_bucket_name }}</strong></p>
+              <p *ngIf="DICTIONARY[PROVIDER].cloud_provider === 'azure' && notebook.shared_account_name">{{ DICTIONARY[PROVIDER].account }}
+                <span class="bucket-info">{{ notebook.shared_account_name}}</span></p>
+              <p *ngIf="notebook.shared_bucket_name">{{ DICTIONARY[PROVIDER].container }}
+                <span class="bucket-info">{{ notebook.shared_bucket_name }}</span></p>
             </div>
             <br />
 
-            <div *ngIf="DICTIONARY.cloud_provider === 'azure' && notebook.datalake_name">
-              <p>Data Lake Store: &#32;</p>
+            <div *ngIf="DICTIONARY[PROVIDER].cloud_provider === 'azure' && notebook.datalake_name">
+              <p>Data Lake Store: &nbsp;</p>
               <div class="links_block">
-                <p>Data Lake Store Account: &#32;<strong>{{ notebook.datalake_name }}</strong></p>
-                <p>Personal folder: &#32;<strong>{{ notebook.datalake_directory }}</strong></p>
-                <p>Collaboration folder: &#32;<strong>{{ notebook.datalake_shared_directory }}</strong></p>
+                <p>Data Lake Store Account: &nbsp;<span class="bucket-info">{{ notebook.datalake_name }}</span></p>
+                <p>Personal folder: &nbsp;<span class="bucket-info">{{ notebook.datalake_directory }}</span></p>
+                <p>Collaboration folder: &nbsp;<span class="bucket-info">{{ notebook.datalake_shared_directory }}</span>
+                </p>
               </div>
             </div>
 
@@ -88,7 +97,7 @@
           </div>
 
           <div class="checkbox-group" *ngIf="notebook.image !== 'docker.dlab-zeppelin'; else not_support"
-            [hidden]="notebook.status !== 'running'">
+            [hidden]="notebook.status !== 'running' || notebook.image === 'docker.dlab-superset' || notebook.image === 'docker.dlab-jupyterlab'">
             <label>
               <input #configurationNode type="checkbox" (change)="selectConfiguration()" /> Cluster configurations
             </label>
@@ -114,7 +123,7 @@
           </ng-template>
           <div [scrollTo]="configuration?.nativeElement['checked'] || false" class="text-center m-top-20 m-bott-10"
             *ngIf="(configuration?.nativeElement['checked'] || false) && notebook.status === 'running'">
-            <button mat-raised-button type="button" (click)="bindDialog.close()" class="butt action">Cancel</button>
+            <button mat-raised-button type="button" (click)="dialogRef.close()" class="butt action">Cancel</button>
             <button mat-raised-button type="submit" [disabled]="!configurationForm.valid"
               class="butt butt-success action" [ngClass]="{'not-allowed': !configurationForm.valid}"
               (click)="editClusterConfiguration(configurationForm.value)">Update</button>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.scss
index d28993f..2849ce0 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.scss
@@ -17,6 +17,8 @@
  * under the License.
  */
 
+@import '_variables.scss';
+
 .scroll-box {
   max-height: 65vh;
   overflow-y: auto;
@@ -24,11 +26,15 @@
 }
 
 .links_block {
-  > p {
+
+  >p {
     display: flex;
+
     .description {
       white-space: nowrap;
-      padding-right: 5px;
+      padding-left: 7px;
+      font-weight: 600;
+      color: $blue-grey-color;
     }
   }
 }
@@ -36,12 +42,15 @@
 .checkbox-group {
   position: relative;
   padding: 5px;
+
   label {
     input[type="checkbox"] {
       margin: 10px 0;
     }
   }
+
   span {
+
     &.danger_color {
       position: absolute;
       bottom: -20px;
@@ -49,10 +58,12 @@
     }
   }
 }
+
 .config-details {
   text-align: left;
   position: relative;
   height: 280px;
+
   textarea {
     height: 100%;
     resize: none;
@@ -60,7 +71,9 @@
     line-height: 1.5;
     font-family: Consolas, monospace;
   }
+
   span {
+
     .danger_color {
       position: absolute;
       bottom: -16px;
@@ -68,3 +81,9 @@
     }
   }
 }
+
+.bucket-info {
+  padding-left: 7px;
+  font-weight: 600;
+  color: $blue-grey-color;
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
index fa05b52..db097f3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
@@ -35,6 +35,7 @@
 
 export class DetailDialogComponent implements OnInit {
   readonly DICTIONARY = DICTIONARY;
+  readonly PROVIDER = this.data.cloud_provider;
   notebook: any;
   upTimeInHours: number;
   upTimeSince: string = '';
@@ -75,7 +76,7 @@
 
   public getClusterConfiguration(): void {
     this.dataengineConfigurationService
-      .getExploratorySparkConfiguration(this.notebook.name)
+      .getExploratorySparkConfiguration(this.notebook.project, this.notebook.name)
       .subscribe(
         (result: any) => this.config = result,
         error => this.toastr.error(error.message || 'Configuration loading failed!', 'Oops!'));
@@ -94,7 +95,7 @@
 
   public editClusterConfiguration(data): void {
     this.dataengineConfigurationService
-      .editExploratorySparkConfiguration(data.configuration_parameters, this.notebook.name)
+      .editExploratorySparkConfiguration(data.configuration_parameters, this.notebook.project, this.notebook.name)
       .subscribe(result => {
         this.dialogRef.close();
       },
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/filter-libs.model.ts
similarity index 68%
copy from integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java
copy to services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/filter-libs.model.ts
index 4b70836..4d211cc 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/filter-libs.model.ts
@@ -17,13 +17,20 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.exceptions;
+export class FilterLibsModel {
+  constructor(
+    public name: string,
+    public group: Array<any>,
+    public resource: Array<any>,
+    public resourceType: Array<any>,
+    public status: Array<any>,
+  ) { }
 
-public class JenkinsException extends RuntimeException {
-
-	private static final long serialVersionUID = 1L;
-
-	public JenkinsException(String message) {
-		super(message);
-	}
+  resetFilterLibs(): void {
+    this.name = '';
+    this.group = [];
+    this.resource = [];
+    this.resourceType = [];
+    this.status = [];
+  }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html
index b015ab8..968f1a3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html
@@ -19,7 +19,7 @@
 
 <div class="install-libraries" id="dialog-box">
   <header class="dialog-header">
-    <h4 class="modal-title">Manage <b>{{ notebook?.name }}</b> Libraries</h4>
+    <h4 class="modal-title">Manage <span>{{ notebook?.name }}</span> Libraries</h4>
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </header>
   <div class="dialog-content">
@@ -28,7 +28,7 @@
       <p class=" message">Cannot install libraries: Exploratory
         <strong>{{ notebook?.name }}</strong> is not running</p>
     </div>
-    <div class="loading-block" *ngIf="!libs_uploaded && uploading && notebook?.status === 'running'">
+    <div class="loading-block" *ngIf="!libs_uploaded && uploading && data.status === 'running'">
       <div class="uploading">
         <p>Please wait until DLab loads full list of available libraries for you...</p>
         <img src="assets/img/gif-spinner.gif" alt="loading">
@@ -48,12 +48,20 @@
               <label class="label">Select group</label>
               <div class="control">
                 <dropdown-list #groupSelect (selectedItem)="onUpdate($event)"></dropdown-list>
+                <span class="error-message" *ngIf="!group && libSearch.value">Group field is required. Please choose appropriate group.</span>
               </div>
             </div>
           </div>
           <div class="search">
             <mat-form-field class="chip-list">
-              <input type="text" [placeholder]="group === 'java' ? 'Enter library name in <groupId>:<artifactId>:<versionId> format' : 'Enter library name'" matInput [formControl]="libSearch" [value]="query" [matAutocomplete]="auto">
+              <input
+                type="text"
+                [placeholder]="group === 'java' ? 'Enter library name in <groupId>:<artifactId>:<versionId> format' : 'Enter library name'"
+                matInput
+                [formControl]="libSearch"
+                [value]="query"
+                [matAutocomplete]="auto"
+              >
               <mat-icon matSuffix>search</mat-icon>
               <mat-autocomplete #auto="matAutocomplete" class="suggestions">
                 <ng-template ngFor let-item [ngForOf]="filteredList" let-i="index">
@@ -84,7 +92,7 @@
                 </mat-option>
               </mat-autocomplete>
             </mat-form-field>
-            <div class="list-selected list-container" id="scrolling">
+            <div class="list-selected list-container" id='scrolling'>
               <mat-chip-list *ngIf="model.selectedLibs.length && libs_uploaded">
                 <mat-chip *ngFor="let item of model.selectedLibs">
                   {{ item.name }}
@@ -101,26 +109,110 @@
     <div class="libs-info">
       <mat-list>
         <mat-list-item class="list-header">
-          <div class="lib-name">Name</div>
-          <div class="lib-group">Group</div>
-          <div class="lib-destination">Destination</div>
-          <div class="lib-resource-type">Resource type</div>
-          <div class="lib-status">Status</div>
+          <div class="lib-name">Name
+            <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
+            <i class="material-icons">
+              <span>more_vert</span>
+            </i>
+          </button></div>
+          <div class="lib-group">Group
+            <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
+              <i class="material-icons">
+                <span>more_vert</span>
+              </i>
+            </button>
+          </div>
+          <div class="lib-destination"><span class="">Destination</span>
+            <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
+              <i class="material-icons">
+                <span>more_vert</span>
+              </i>
+            </button>
+          </div>
+          <div class="lib-resource-type">Resource type
+            <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
+              <i class="material-icons">
+                <span>more_vert</span>
+              </i>
+            </button>
+          </div>
+          <div class="lib-status"><span>Status</span>
+            <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
+              <i class="material-icons">
+                <span>more_vert</span>
+              </i>
+            </button>
+          </div>
         </mat-list-item>
 
-        <div class="scrollingList" id="scrolling" *ngIf="notebookLibs?.length">
-          <div *ngIf="notebook?.status !== 'running' && notebookFailedLibs.length > 0" class="info message">
-            <p>Cannot retry to reinstall failed libraries: Exploratory {{ notebook?.name }} is not running</p>
-          </div>
+        <ng-container *ngIf="filtered" >
+         <mat-list-item class="lib-col filter-row">
+          <th class="lib-name lib-input">
+            <input placeholder="Filter by library name" [value]="filterModel.name" (input)="filterModel.name = $event.target['value']" type="text" class="form-control filter-field "/>
+          </th>
+          <th class="lib-group lib-col">
+              <multi-select-dropdown
+                (selectionChange)="onFilterUpdate($event)"
+                [items]="this.filterConfiguration.group"
+                [type]="'group'"
+                [model]="this.filterModel.group"
+              >
+              </multi-select-dropdown>
+            </th>
+            <th class="lib-destination lib-col">
+              <multi-select-dropdown
+                (selectionChange)="onFilterUpdate($event)"
+                [items]="this.filterConfiguration.resource"
+                [type]="'resource'"
+                [model]="this.filterModel.resource"
+              >
+              </multi-select-dropdown>
+            </th>
+            <th class="lib-resource-type lib-col">
+              <multi-select-dropdown
+                (selectionChange)="onFilterUpdate($event)"
+                [items]="this.filterConfiguration.resourceType"
+                [type]="'resource type'"
+                [model]="this.filterModel.resourceType"
+              >
+              </multi-select-dropdown>
+          </th>
+          <th class="lib-status lib-col">
+            <multi-select-dropdown
+              (selectionChange)="onFilterUpdate($event)"
+              [items]="this.filterConfiguration.status"
+              [type]="'status'"
+              [model]="this.filterModel.status">
+            </multi-select-dropdown>
+          </th>
+           <ng-container matColumnDef="action-filter" stickyEnd>
+             <th mat-header-cell>
+               <div class="filter-actions">
+                 <button mat-icon-button class="btn reset" (click)="resetFilterConfigurations()">
+                   <i class="material-icons">close</i>
+                 </button>
 
-          <mat-list-item *ngFor="let lib of notebookLibs">
-            <div class="lib-name">
+                 <button mat-icon-button class="btn apply" (click)="filterLibs()">
+                   <i class="material-icons"  [ngClass]="{'not-allowed': filterModel.length === 0}">done</i>
+                 </button>
+               </div>
+             </th>
+           </ng-container>
+         </mat-list-item>
+        </ng-container>
+
+        <div class="scrollingList" id="scrolling" *ngIf="notebookLibs?.length">
+<!--          <div *ngIf="notebook?.status !== 'running' && notebookFailedLibs.length > 0" class="info message">-->
+<!--            <p>Cannot retry to reinstall failed libraries: Exploratory {{ notebook?.name }} is not running</p>-->
+<!--          </div>-->
+          <mat-list-item  *ngFor="let lib of filtredNotebookLibs">
+            <div class="lib-name ellipsis">
               <strong>{{ lib.name }}</strong>&nbsp;
-              <span *ngIf="lib.version  && lib.version !== 'N/A'">v.{{ lib.version }}</span>
+              <span *ngIf="lib.version  && lib.version !== 'N/A'">{{ lib.version }}</span>
             </div>
-            <div class="lib-group">{{ groupsListMap[lib.group] }}</div>
+            <div class="lib-group-col">{{ groupsListMap[lib.group] }}</div>
             <div class="st-group">
-              <ng-template let-item ngFor [ngForOf]="lib.status">
+              <ng-template let-item ngFor [ngForOf]="lib.filteredStatus">
                 <div class="wrap-col">
                   <div class="lib-destination-col">{{ item.resource }}</div>
                   <div class="lib-resource-type-col">{{ item.resourceType }}</div>
@@ -144,6 +236,9 @@
               </ng-template>
             </div>
           </mat-list-item>
+          <div *ngIf="!filtredNotebookLibs.length && notebookLibs?.length" class="scrollingList info message">
+            <p>No matches found</p>
+          </div>
         </div>
         <div *ngIf="!notebookLibs?.length" class="scrollingList info message">
           <p>You have no libraries installed</p>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
index 3d033de..a6b7070 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
@@ -29,6 +29,13 @@
       justify-content: center;
       flex-direction: initial;
       margin-bottom: 25px;
+      bottom: 30px;
+      left: 0;
+      right: 0;
+      position: absolute;
+    }
+    strong{
+      font-weight: 600;
     }
   }
 }
@@ -53,16 +60,12 @@
 }
 
 .info {
-  height: 40%;
-  display: flex;
-  flex-direction: row;
-  align-items: center;
-  justify-content: center;
+  height: 30%;
 }
 
-
 .install-libraries {
   height: 100%;
+  padding-bottom: 130px;
 
   .dialog-header {
     padding-left: 25px;
@@ -87,6 +90,7 @@
       height: auto !important;
       min-height: 54px !important;
       border-bottom: 1px solid #edf1f5;
+      padding-right: 10px;
     }
   }
 
@@ -185,7 +189,7 @@
       width: 50px;
       text-align: center;
       font-size: 11px;
-      font-weight: 300;
+      font-weight: 600;
 
       i {
         position: absolute;
@@ -242,6 +246,18 @@
 }
 
 /* Tab info */
+.list-header{
+  line-height: 40px;
+}
+
+.list-header+div{
+  padding-left: 5px;
+}
+
+.mat-dialog-container {
+  position: relative;
+}
+
 .libs-info {
   padding: 0 20px;
   display: flex;
@@ -253,7 +269,7 @@
     width: 100%;
 
     .scrollingList {
-      max-height: 80%;
+      max-height: 300px;
       height: 80%;
       overflow-y: auto;
       overflow-x: hidden;
@@ -266,9 +282,15 @@
       }
     }
   }
+  .lib-col{
+    padding-left: 4px;
 
+  }
   .lib-name {
-    width: 30%;
+    width: 27%;
+    &.lib-input{
+
+    }
 
     strong {
       font-weight: 400;
@@ -277,41 +299,49 @@
 
   .lib-group,
   .lib-destination {
-    width: 20%;
+    width: 17%;
+    padding-left: 6px;
   }
 
   .lib-status {
-    width: 20%;
-    text-align: center;
+    width: 17%;
+    padding-left: 6px;
   }
 
   .lib-resource-type {
-    width: 10%;
+    width: 17%;
+    padding-left: 6px;
+  }
+  .lib-group-col {
+    width: 17%;
+    padding-left: 8px;
   }
 
   .st-group {
     display: flex;
     flex-direction: column;
-    width: 50%;
+    width: 51%;
 
     .wrap-col {
       display: flex;
       padding: 5px 0px;
 
       .lib-destination-col {
-        width: 40%;
+        width: 33.3%;
+        padding-left: 8px;
       }
 
       .lib-resource-type-col {
-        width: 20%;
+        width: 33.3%;
         color: #36afd5;
+        padding-left: 8px;
       }
 
       .lib-status-col {
-        text-align: center;
         position: relative;
-        width: 40%;
-        text-align: center;
+        width: 33.3%;
+        padding-left: 8px;
+
 
         .warn-action {
           position: absolute;
@@ -349,6 +379,78 @@
   background-color: #35afd5;
 }
 
+.dropdown-multiselect button span {
+  padding-top: 7px;
+  padding-bottom: 3px;
+}
+
+.mat-dialog-container.mat-dialog-container .install-libraries#dialog-box  {
+  .mat-header-cell{
+    padding: 0;
+    border: none;
+  }
+
+  .filter-actions {
+    display: flex;
+    margin-left: 6px;
+    .btn {
+      padding: 6px;
+      height: auto;
+      width: auto;
+      min-width: 0;
+
+      .mat-button-wrapper {
+        display: flex;
+      }
+    }
+    .reset{
+      &:hover {
+        border-color: #f1696e;
+        background: #f9fafb;
+        color: #f1696e;
+      }
+    }
+    .apply:hover {
+      border-color: #49af38;
+      background: #f9fafb;
+      color: #49af38;
+    }
+  }
+}
+
+.install-libraries .dropdown-multiselect .list-menu li {
+  a{
+    font-size: 13px;
+    &.list-item{
+      color: #4a5c89 !important;
+    }
+  }
+}
+
+.filter-row .filter-actions {
+  display: flex;
+
+  .reset{
+      &:hover {
+      border-color: #f1696e;
+      background: #f9fafb;
+      color: #f1696e;
+    }
+  }
+  .apply:hover {
+    border-color: #49af38;
+    background: #f9fafb;
+    color: #49af38;
+  }
+}
+
+.error-message{
+  position: absolute;
+  left: 20%;
+  top: 40px;
+  font-size: 11px;
+  color: red;
+}
 
 @media screen and (min-width: 1281px) {
   .libs-info {
@@ -356,8 +458,21 @@
 
     .mat-list {
       .scrollingList {
-        max-height: 85%;
-        height: 85%;
+        max-height: 300px;
+        height: 80%;
+      }
+    }
+  }
+}
+
+@media screen and (max-height: 800px) {
+  .libs-info {
+    height: 50%;
+
+    .mat-list {
+      .scrollingList {
+        max-height: 140px;
+        height: 60%;
       }
     }
   }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
index 917fcbb..2f2d733 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
@@ -18,7 +18,7 @@
  */
 
 
-import { Component, OnInit, ViewChild, ViewEncapsulation, ChangeDetectorRef, Inject } from '@angular/core';
+import {Component, OnInit, ViewChild, ViewEncapsulation, ChangeDetectorRef, Inject, OnDestroy} from '@angular/core';
 import { MatDialog, MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog';
 import { FormControl } from '@angular/forms';
 import { ToastrService } from 'ngx-toastr';
@@ -27,6 +27,7 @@
 import { InstallLibrariesModel } from './install-libraries.model';
 import { LibrariesInstallationService } from '../../../core/services';
 import { SortUtils, HTTP_STATUS_CODES } from '../../../core/util';
+import {FilterLibsModel} from './filter-libs.model';
 
 
 @Component({
@@ -35,7 +36,7 @@
   styleUrls: ['./install-libraries.component.scss'],
   encapsulation: ViewEncapsulation.None
 })
-export class InstallLibrariesComponent implements OnInit {
+export class InstallLibrariesComponent implements OnInit, OnDestroy {
 
   public model: InstallLibrariesModel;
   public notebook: any;
@@ -43,6 +44,7 @@
   public groupsList: Array<string>;
   public notebookLibs: Array<any> = [];
   public notebookFailedLibs: Array<any> = [];
+  public loadLibsTimer: any;
 
   public query: string = '';
   public group: string;
@@ -66,7 +68,11 @@
 
   private readonly CHECK_GROUPS_TIMEOUT: number = 5000;
   private clear: number;
-  private clearCheckInstalling = undefined;
+
+  public filterConfiguration: FilterLibsModel = new FilterLibsModel('', [], [], [], []);
+  public filterModel: FilterLibsModel = new FilterLibsModel('', [], [], [], []);
+  public filtered: boolean;
+  public filtredNotebookLibs: Array<any> = [];
 
   @ViewChild('groupSelect', { static: false }) group_select;
   @ViewChild('resourceSelect', { static: false }) resource_select;
@@ -83,18 +89,26 @@
   }
 
   ngOnInit() {
-    this.libSearch.disable();
+    this.open(this.data);
+    this.uploadLibGroups();
     this.libSearch.valueChanges.pipe(
       debounceTime(1000))
       .subscribe(newValue => {
         this.query = newValue || '';
         this.filterList();
       });
-    this.open(this.data);
+    this.getInstalledLibsByResource();
+  }
+
+  ngOnDestroy() {
+    clearTimeout(this.loadLibsTimer);
+    clearTimeout(this.clear);
   }
 
   uploadLibGroups(): void {
-    this.librariesInstallationService.getGroupsList(this.notebook.name, this.model.computational_name)
+    this.libs_uploaded = false;
+    this.uploading = true;
+    this.librariesInstallationService.getGroupsList(this.notebook.project, this.notebook.name, this.model.computational_name)
       .subscribe(
         response => {
           this.libsUploadingStatus(response);
@@ -142,19 +156,21 @@
       this.group = $event.model.value;
     } else if ($event.model.type === 'destination') {
       this.resetDialog();
-
       this.destination = $event.model.value;
       this.destination && this.destination.type === 'СOMPUTATIONAL'
         ? this.model.computational_name = this.destination.name
         : this.model.computational_name = null;
 
-      this.libSearch.enable();
       this.uploadLibGroups();
       this.getInstalledLibsByResource();
     }
     this.filterList();
   }
 
+  public onFilterUpdate($event) {
+    this.filterModel[$event.type] = $event.model;
+  }
+
   public isDuplicated(item) {
     const select = { group: this.group, name: item.name, version: item.version };
 
@@ -172,10 +188,8 @@
 
   public selectLibrary(item): void {
     this.model.selectedLibs.push({ group: this.group, name: item.name, version: item.version });
-
     this.query = '';
     this.libSearch.setValue('');
-
     this.filteredList = null;
   }
 
@@ -185,6 +199,7 @@
 
   public open(notebook): void {
     this.notebook = notebook;
+    this.destination = this.getResourcesList()[0];
     this.model = new InstallLibrariesModel(notebook,
       response => {
         if (response.status === HTTP_STATUS_CODES.OK) {
@@ -199,25 +214,20 @@
         this.selectorsReset();
       },
       this.librariesInstallationService);
-  }
+ }
 
   public showErrorMessage(item): void {
     const dialogRef: MatDialogRef<ErrorMessageDialogComponent> = this.dialog.open(
       ErrorMessageDialogComponent, { data: item.error, width: '550px', panelClass: 'error-modalbox' });
   }
 
-  public isInstallingInProgress(data): void {
-    this.notebookFailedLibs = data.filter(lib => lib.status.some(inner => inner.status === 'failed'));
-    this.installingInProgress = data.filter(lib => lib.status.some(inner => inner.status === 'installing')).length > 0;
-
-    if (this.installingInProgress || this.notebookFailedLibs.length) {
-      if (this.clearCheckInstalling === undefined)
-        this.clearCheckInstalling = window.setInterval(() => this.getInstalledLibrariesList(), 10000);
-    } else {
-      clearInterval(this.clearCheckInstalling);
-      this.clearCheckInstalling = undefined;
+  public isInstallingInProgress(): void {
+    this.installingInProgress = this.notebookLibs.some(lib => lib.filteredStatus.some(status => status.status === 'installing'));
+      if (this.installingInProgress) {
+        clearTimeout(this.loadLibsTimer);
+        this.loadLibsTimer = window.setTimeout(() => this.getInstalledLibrariesList(), 10000);
+      }
     }
-  }
 
   public reinstallLibrary(item, lib) {
     const retry = [{ group: lib.group, name: lib.name, version: lib.version }];
@@ -232,14 +242,35 @@
   private getInstalledLibrariesList(init?: boolean) {
     this.model.getInstalledLibrariesList(this.notebook)
       .subscribe((data: any) => {
+        if ( !this.filtredNotebookLibs.length || data.length !== this.notebookLibs.length) {
+          this.filtredNotebookLibs = [...data];
+        }
+        this.filtredNotebookLibs = data.filter(lib =>
+          this.filtredNotebookLibs.some(v =>
+            (v.name + v.version === lib.name + v.version) && v.resource === lib.resource));
         this.notebookLibs = data ? data : [];
+        this.notebookLibs.forEach(lib => {
+          lib.filteredStatus = lib.status;
+          if (lib.version && lib.version !== 'N/A')
+            lib.version = 'v.' +  lib.version;
+          }
+        );
         this.changeDetector.markForCheck();
-        this.isInstallingInProgress(this.notebookLibs);
+        this.filterConfiguration.group = this.createFilterList(this.notebookLibs.map(v => this.groupsListMap[v.group]));
+        this.filterConfiguration.resource = this.createFilterList(this.notebookLibs.map(lib => lib.status.map(status => status.resource)));
+        this.filterConfiguration.resourceType = this.createFilterList(this.notebookLibs.map(lib =>
+          lib.status.map(status => status.resourceType)));
+        this.filterConfiguration.status = this.createFilterList(this.notebookLibs.map(lib => lib.status.map(status => status.status)));
+        this.isInstallingInProgress();
       });
   }
 
+  public createFilterList(array): [] {
+    return array.flat().filter((v, i, arr) => arr.indexOf(v) === i);
+  }
+
   private getInstalledLibsByResource() {
-    this.librariesInstallationService.getInstalledLibsByResource(this.notebook.name, this.model.computational_name)
+    this.librariesInstallationService.getInstalledLibsByResource(this.notebook.project, this.notebook.name, this.model.computational_name)
       .subscribe((data: any) => this.destination.libs = data);
   }
 
@@ -277,9 +308,9 @@
   }
 
   private selectorsReset(): void {
-    this.resource_select && this.resource_select.setDefaultOptions(this.getResourcesList(),
-      'Select resource', 'destination', 'title', 'array');
-    this.group_select && this.group_select.setDefaultOptions([], '', 'group_lib', null, 'array');
+    this.destination = this.getResourcesList()[0];
+    this.uploadLibGroups();
+    this.getInstalledLibsByResource();
   }
 
   private resetDialog(): void {
@@ -291,15 +322,38 @@
     this.uploading = false;
     this.model.selectedLibs = [];
     this.filteredList = null;
-    this.destination = null;
     this.groupsList = [];
 
-    this.libSearch.disable();
     clearTimeout(this.clear);
-    clearInterval(this.clearCheckInstalling);
-    this.clearCheckInstalling = undefined;
+    clearTimeout(this.loadLibsTimer);
     this.selectorsReset();
   }
+
+  public toggleFilterRow(): void {
+    this.filtered = !this.filtered;
+  }
+
+  public filterLibs(): void {
+    this.filtredNotebookLibs = this.notebookLibs.filter((lib) => {
+      const isName = this.filterModel.name ?
+        lib.name.toLowerCase().indexOf(this.filterModel.name.toLowerCase().trim()) !== -1
+        || lib.version.indexOf(this.filterModel.name.toLowerCase().trim()) !== -1 : true;
+      const isGroup = this.filterModel.group.length ? this.filterModel.group.includes(this.groupsListMap[lib.group]) : true;
+      lib.filteredStatus = lib.status.filter(status => {
+        const isResource = this.filterModel.resource.length ? this.filterModel.resource.includes(status.resource) : true;
+        const isResourceType = this.filterModel.resourceType.length ? this.filterModel.resourceType.includes(status.resourceType) : true;
+        const isStatus = this.filterModel.status.length ? this.filterModel.status.includes(status.status) : true;
+        return isResource && isResourceType && isStatus;
+      });
+      return isName && isGroup && lib.filteredStatus.length;
+    });
+  }
+
+  public resetFilterConfigurations(): void {
+    this.notebookLibs.forEach(v => v.filteredStatus = v.status);
+    this.filtredNotebookLibs = [...this.notebookLibs];
+    this.filterModel.resetFilterLibs();
+  }
 }
 
 @Component({
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
index 6d0369e..b201904 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
@@ -56,7 +56,8 @@
   }
 
   public getLibrariesList(group: string, query: string): Observable<{}> {
-    let lib_query: any = {
+    const lib_query: any = {
+      project_name: this.notebook.project,
       exploratory_name: this.notebook.name,
       group: group,
       start_with: query
@@ -75,12 +76,13 @@
 
   public getInstalledLibrariesList(notebook): Observable<{}> {
     return this.librariesInstallationService.getInstalledLibrariesList(
-      notebook.name
+      notebook.project, notebook.name
     );
   }
 
   private installLibraries(retry?: Library, item?): Observable<{}> {
-    let lib_list: any = {
+    const lib_list: any = {
+      project_name: this.notebook.project,
       exploratory_name: this.notebook.name,
       libs: retry ? retry : this.selectedLibs
     };
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/NetworkSettings.java b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/notebook.model.ts
similarity index 65%
rename from integration-tests/src/main/java/com/epam/dlab/automation/docker/NetworkSettings.java
rename to services/self-service/src/main/resources/webapp/src/app/resources/exploratory/notebook.model.ts
index 295c217..3e96932 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/NetworkSettings.java
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/notebook.model.ts
@@ -16,21 +16,27 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+/* tslint:disable:no-empty */
 
-package com.epam.dlab.automation.docker;
 
-import com.fasterxml.jackson.annotation.JsonProperty;
+import {Injectable} from '@angular/core';
 
-public class NetworkSettings {
+@Injectable()
 
-	@JsonProperty("Networks")
-	private Networks networks;
+export class NotebookModel {
+  constructor(
+    public project: string,
+    public exploratory: string,
+    public status: string,
+  ) { }
 
-    public Networks getNetworks() {
-		return networks;
+  public static notebook(data) {
+    if (data) {
+      return data.map(value => value.exploratory.map(el =>  new NotebookModel(
+        value.project,
+        el.name,
+        el.status,
+      )));
     }
-
-    public void setNetworks(Networks networks) {
-		this.networks = networks;
-    }
+  }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.html
index 6697e68..a6c2610 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.html
@@ -134,16 +134,15 @@
                 <div class="control">
                   <input type="password" formControlName="confirmPassword" placeholder="Enter Password">
                 </div>
-                <span class="danger_color"
-                  *ngIf="!updateAccountCredentialsForm.controls['confirmPassword'].valid && updateAccountCredentialsForm.controls['confirmPassword'].touched">
-                  Field is required. Please confirm a password
+                <span class="danger_color" *ngIf="updateAccountCredentialsForm.value.password !== updateAccountCredentialsForm.value.confirmPassword && updateAccountCredentialsForm.controls['confirmPassword'].touched && !!updateAccountCredentialsForm.value.password">
+                  Passwords don't match.
                 </span>
               </div>
             </div>
           </div>
           <div class="text-center submit m-bott-10">
             <button mat-raised-button type="button" class="butt action" (click)="resetForm()">Clear</button>
-            <button mat-raised-button type="button" [disabled]="!updateAccountCredentialsForm.valid"
+            <button mat-raised-button type="button" [disabled]="!updateAccountCredentialsForm.valid || updateAccountCredentialsForm.value.password !== updateAccountCredentialsForm.value.confirmPassword"
               (click)="assignChanges(updateAccountCredentialsForm.value)"
               class="butt butt-success action">Assign</button>
           </div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.ts
index 9d7ad75..aadfc24 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/manage-ungit/manage-ungit.component.ts
@@ -96,7 +96,7 @@
       'email': [item.email, Validators.compose([Validators.required, Validators.pattern(this.mail_validity_pattern)])],
       'login': [item.login, Validators.compose([Validators.required, Validators.pattern(this.login_acceptance_pattern)])],
       'password': ['', Validators.compose([Validators.required, Validators.minLength(6)])],
-      'confirmPassword': ['', Validators.compose([Validators.required, this.validConfirmField.bind(this)])]
+      'confirmPassword': ['', Validators.compose([Validators.required])]
     });
   }
 
@@ -140,7 +140,7 @@
       'email': ['', Validators.compose([Validators.required, Validators.pattern(this.mail_validity_pattern)])],
       'login': ['', Validators.compose([Validators.required, Validators.pattern(this.login_acceptance_pattern)])],
       'password': ['', Validators.compose([Validators.required, Validators.minLength(6)])],
-      'confirmPassword': ['', Validators.compose([Validators.required, this.validConfirmField.bind(this)])]
+      'confirmPassword': ['', Validators.compose([Validators.required])]
     });
   }
 
@@ -151,15 +151,6 @@
         error => this.toastr.error(error.message || 'Git credentials loading failed!', 'Oops!'));
   }
 
-  private validConfirmField(control) {
-    if (this.updateAccountCredentialsForm) {
-      const passReq = this.updateAccountCredentialsForm.get('password');
-      const confirmPassReq = this.updateAccountCredentialsForm.get('confirmPassword');
-
-      return passReq.value === confirmPassReq.value ? null : { valid: false };
-    }
-  }
-
   private containsHostname(control) {
     let duplication = null;
 
@@ -182,8 +173,8 @@
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </div>
   <div mat-dialog-content class="content">
-    <p>Account <strong>{{ data.hostname }}</strong> will be decommissioned.</p>
-    <p class="m-top-20"><strong>Do you want to proceed?</strong></p>
+    <p>Account <span class="strong">{{ data.hostname }}</span> will be decommissioned.</p>
+    <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
   </div>
   <div class="text-center">
     <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
index d030a3a..69edcc3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
@@ -39,7 +39,7 @@
     </ng-container> -->
 
     <ng-container matColumnDef="name" sticky>
-      <th mat-header-cell *matHeaderCellDef class="name-col">
+      <th mat-header-cell *matHeaderCellDef class="name-col label-header">
         <span class="label">Environment name</span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
@@ -50,7 +50,7 @@
       </th>
     </ng-container>
     <ng-container matColumnDef="statuses">
-      <th mat-header-cell *matHeaderCellDef class="status-col">
+      <th mat-header-cell *matHeaderCellDef class="status-col label-header">
         <span class="label"> Status </span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
@@ -61,8 +61,8 @@
       </th>
     </ng-container>
     <ng-container matColumnDef="shapes">
-      <th mat-header-cell *matHeaderCellDef class="shape-col">
-        <span class="label"> {{ DICTIONARY.instance_size }} </span>
+      <th mat-header-cell *matHeaderCellDef class="shape-col label-header">
+        <span class="label"> Size </span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filtering && filterForm.shapes.length > 0 && !collapseFilterRow">filter_list</span>
@@ -72,13 +72,13 @@
       </th>
     </ng-container>
     <ng-container matColumnDef="tag">
-      <th mat-header-cell *matHeaderCellDef class="tag-col">
+      <th mat-header-cell *matHeaderCellDef class="tag-col label-header">
         <span class="label"> Tags </span>
       </th>
     </ng-container>
     <ng-container matColumnDef="resources">
-      <th mat-header-cell *matHeaderCellDef class="resources-col">
-        <span class="label"> {{ DICTIONARY.computational_resource }} </span>
+      <th mat-header-cell *matHeaderCellDef class="resources-col label-header">
+        <span class="label"> Computational resources </span>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filtering && filterForm.resources.length > 0 && !collapseFilterRow">filter_list</span>
@@ -88,12 +88,14 @@
       </th>
     </ng-container>
     <ng-container matColumnDef="cost">
-      <th mat-header-cell *matHeaderCellDef class="cost-col">
+      <th mat-header-cell *matHeaderCellDef class="cost-col label-header">
         <span class="label"> Cost </span>
       </th>
     </ng-container>
     <ng-container matColumnDef="actions" stickyEnd>
-      <th mat-header-cell *matHeaderCellDef class="actions-col"></th>
+      <th mat-header-cell *matHeaderCellDef class="actions-col label-header">
+        <span class="label"> Actions </span>
+      </th>
     </ng-container>
 
     <!-- ----------------------------------------------------- -->
@@ -103,7 +105,9 @@
         [@detailExpand]="element == expandedElement ? 'expanded' : 'collapsed'" sticky>
 
         <tr *ngFor="let element of element.exploratory; let i = index" class="element-row mat-row">
-          <td class="name-col" (click)="printDetailEnvironmentModal(element)">{{ element.name }}</td>
+          <td class="name-col" (click)="printDetailEnvironmentModal(element)">
+            <span matTooltip="{{ element.name }}" matTooltipPosition="above">{{ element.name }}</span>
+          </td>
           <td class="status-col status" ngClass="{{ element.status.toLowerCase() || ''}}">
             {{element.status | underscoreless }}
           </td>
@@ -135,14 +139,16 @@
           <td *ngIf="healthStatus?.billingEnabled" class="cost-col">
             <span class="total_cost">{{ element.cost || 'N/A' }} {{ element.currency_code || '' }}</span>
             <span (click)="element.billing && printCostDetails(element)" class="currency_details"
-              [ngClass]="{ 'not-allowed' : !element.billing }">
+              [ngClass]="{ 'not-allowed' : !element.billing.report_lines.length }">
               <i class="material-icons">help_outline</i>
             </span>
           </td>
 
           <td class="settings">
             <span #settings (click)="actions.toggle($event, settings)" class="actions"
-              [ngClass]="{ 'disabled': element.status.toLowerCase() === 'creating' }">
+              [ngClass]="{ 'disabled': element.status.toLowerCase() === 'creating'
+              || (element.image === 'docker.dlab-superset' && element.status !== 'running' && element.status !== 'stopped')
+              || (element.image === 'docker.dlab-jupyterlab' && element.status !== 'running' && element.status !== 'stopped') }">
             </span>
 
             <bubble-up #actions class="list-menu" position="bottom-left" alternative="top-left">
@@ -152,7 +158,7 @@
                 && element.status !== 'terminated'
                 && element.status !== 'creating image'">
                   <li
-                    *ngIf="element.status !== 'stopped' && element.status !== 'stopping' && element.status !== 'starting' && element.status !== 'creating image'"
+                    *ngIf="element.status !== 'stopped' && element.status !== 'stopping' && element.status !== 'starting' && element.status !== 'creating image' && element.status.toLowerCase() !== 'reconfiguring'"
                     matTooltip="Unable to stop notebook because at least one computational resource is in progress"
                     matTooltipPosition="above" [matTooltipDisabled]="!isResourcesInProgress(element)">
                     <div (click)="exploratoryAction(element, 'stop')"
@@ -162,10 +168,12 @@
                     </div>
                   </li>
                   <li *ngIf="element.status.toLowerCase() === 'stopped' || element.status.toLowerCase() === 'stopping'"
-                    matTooltip="Unable to run notebook until it will be stopped" matTooltipPosition="above"
-                    [matTooltipDisabled]="!isResourcesInProgress(element) && element.status.toLowerCase() !== 'stopping'">
+                    matTooltip="{{element.edgeNodeStatus !== 'running' ? 'Unable to run notebook if edge node is not running.' : 'Unable to run notebook until it will be stopped.'}}" matTooltipPosition="above"
+                    [matTooltipDisabled]="!isResourcesInProgress(element) && element.status.toLowerCase() !== 'stopping' && element.edgeNodeStatus === 'running'"
+                    [ngClass]="{'not-allow': isResourcesInProgress(element) || element.status.toLowerCase() === 'stopping' || element.edgeNodeStatus !== 'running' }"
+                  >
                     <div (click)="exploratoryAction(element, 'run')"
-                      [ngClass]="{'not-allowed': isResourcesInProgress(element) || element.status.toLowerCase() === 'stopping' }">
+                      [ngClass]="{'not-allowed': isResourcesInProgress(element) || element.status.toLowerCase() === 'stopping' || element.edgeNodeStatus !== 'running' }">
                       <i class="material-icons">play_circle_outline</i>
                       <span>Run</span>
                     </div>
@@ -179,10 +187,8 @@
                       <span>Terminate</span>
                     </div>
                   </li>
-                  <li (click)="exploratoryAction(element, 'deploy')" *ngIf="element.status != 'stopping'
-                && element.status !== 'stopped'
-                && element.status !== 'starting'
-                && element.status !== 'creating image'">
+                  <li (click)="exploratoryAction(element, 'deploy')"
+                    *ngIf="element.status === 'running' && element.image !== 'docker.dlab-superset' && element.image !== 'docker.dlab-jupyterlab'">
                     <i class="material-icons">memory</i>
                     <span>Add compute</span>
                   </li>
@@ -193,11 +199,12 @@
                   </li>
                 </div>
                 <li (click)="exploratoryAction(element, 'ami')"
-                  *ngIf="element.status === 'running' && DICTIONARY.cloud_provider !== 'gcp'">
+                  *ngIf="element.status === 'running' && element.cloud_provider !== 'gcp' && element.image !== 'docker.dlab-superset' && element.image !== 'docker.dlab-jupyterlab'">
                   <i class="material-icons">view_module</i>
-                  <span>Create {{ DICTIONARY.image }}</span>
+                  <span>Create {{ DICTIONARY[element.cloud_provider].image }}</span>
                 </li>
-                <li (click)="exploratoryAction(element, 'install')">
+                <li (click)="exploratoryAction(element, 'install')"
+                  *ngIf="element.image !== 'docker.dlab-superset' && element.image !== 'docker.dlab-jupyterlab'">
                   <i class="material-icons">developer_board</i>
                   <span>Manage libraries</span>
                 </li>
@@ -217,37 +224,37 @@
 
     <!-- FILTER START -->
     <ng-container matColumnDef="name-filter" sticky>
-      <th mat-header-cell *matHeaderCellDef class="name-col">
+      <th mat-header-cell *matHeaderCellDef class="name-col filter-row-item">
         <input placeholder="Filter by environment name" type="text" class="form-control filter-field"
-          [value]="filterForm.name" (input)="filterForm.name = $event.target.value" />
+          [value]="filterForm.name" (input)="filterForm.name = $event.target['value']" />
       </th>
     </ng-container>
     <ng-container matColumnDef="status-filter">
-      <th mat-header-cell *matHeaderCellDef class="status-col">
+      <th mat-header-cell *matHeaderCellDef class="status-col filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'statuses'"
           [items]="filterConfiguration.statuses" [model]="filterForm.statuses"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="shape-filter">
-      <th mat-header-cell *matHeaderCellDef class="shape-col">
+      <th mat-header-cell *matHeaderCellDef class="shape-col filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)"
-          [type]="DICTIONARY.cloud_provider === 'aws' ? 'shapes': 'sizes'" [items]="filterConfiguration.shapes"
+          [type]="'sizes'" [items]="filterConfiguration.shapes"
           [model]="filterForm.shapes"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="tag-filter">
-      <th mat-header-cell *matHeaderCellDef class="tag-col">
+      <th mat-header-cell *matHeaderCellDef class="tag-col filter-row-item">
 
       </th>
     </ng-container>
     <ng-container matColumnDef="resource-filter">
-      <th mat-header-cell *matHeaderCellDef class="resources-col">
+      <th mat-header-cell *matHeaderCellDef class="resources-col filter-row-item">
         <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'resources'"
           [items]="filterConfiguration.resources" [model]="filterForm.resources"></multi-select-dropdown>
       </th>
     </ng-container>
     <ng-container matColumnDef="cost-filter">
-      <th mat-header-cell *matHeaderCellDef class="cost-col">
+      <th mat-header-cell *matHeaderCellDef class="cost-col filter-row-item">
 
       </th>
     </ng-container>
@@ -255,14 +262,13 @@
     <ng-container matColumnDef="action-filter" stickyEnd>
       <th mat-header-cell *matHeaderCellDef>
         <div class="actions">
-          <button mat-icon-button class="btn reset" (click)="resetFilterConfigurations()">
+          <button mat-icon-button class="btn reset" (click)="resetFilterConfigurations()" [disabled]="filteredEnvironments.length == 0 && !filtering">
             <i class="material-icons">close</i>
           </button>
 
           <button mat-icon-button class="btn apply" (click)="applyFilter_btnClick(filterForm)"
             [disabled]="filteredEnvironments.length == 0 && !filtering">
-            <i class="material-icons"
-              [ngClass]="{'not-allowed': filteredEnvironments.length == 0 && !filtering}">done</i>
+            <i class="material-icons">done</i>
           </button>
         </div>
       </th>
@@ -279,7 +285,7 @@
 
     <!-- FILTER END -->
 
-    <tr mat-header-row *matHeaderRowDef="displayedColumns" class="header-row"></tr>
+    <tr mat-header-row *matHeaderRowDef="displayedColumns; sticky: true" class="header-row"></tr>
 
     <tr [hidden]="!collapseFilterRow" mat-header-row *matHeaderRowDef="displayedFilterColumns; sticky: true"
       class="filter-row"></tr>
@@ -293,170 +299,3 @@
     <tr [hidden]="filteredEnvironments?.length" mat-footer-row *matFooterRowDef="['placeholder']"></tr>
   </table>
 </section>
-
-
-
-
-
-
-
-
-<!-- <table class=" dashboard_table data-grid">
-        <tr>
-          <th *ngFor="let column of filteringColumns" ngClass="{{column.className || ''}}"
-            [hidden]="column.name === 'cost' && !healthStatus?.billingEnabled">
-            {{column.title}}
-            <button mat-icon-button *ngIf="column.filtering" aria-label="More" class="ar" (click)="toggleFilterRow()">
-              <i class="material-icons">
-                <span *ngIf="filtering && filterForm[column.name].length > 0 && !collapseFilterRow">filter_list</span>
-                <span [hidden]="filtering && filterForm[column.name].length > 0 && !collapseFilterRow">more_vert</span>
-              </i>
-            </button>
-          </th>
-        </tr>
-
-
-
-
-        <tr *ngIf="filteredEnvironments && collapseFilterRow" class="filter-row">
-          <td>
-            <input placeholder="Filter by environment name" type="text" class="form-control filter-field"
-              [value]="filterForm.name" (input)="filterForm.name = $event.target.value" />
-          </td>
-          <td>
-            <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'statuses'"
-              [items]="filterConfiguration.statuses" [model]="filterForm.statuses"></multi-select-dropdown>
-          </td>
-          <td>
-            <multi-select-dropdown (selectionChange)="onUpdate($event)"
-              [type]="DICTIONARY.cloud_provider === 'aws' ? 'shapes': 'sizes'" [items]="filterConfiguration.shapes"
-              [model]="filterForm.shapes"></multi-select-dropdown>
-          </td>
-          <td>
-            <multi-select-dropdown (selectionChange)="onUpdate($event)" [type]="'resources'"
-              [items]="filterConfiguration.resources" [model]="filterForm.resources"></multi-select-dropdown>
-          </td>
-          <td *ngIf="healthStatus?.billingEnabled"></td>
-          <td>
-            <div class="actions">
-              <button mat-icon-button class="btn reset" (click)="resetFilterConfigurations()">
-                <i class="material-icons">close</i>
-              </button>
-
-              <button mat-icon-button class="btn apply" (click)="applyFilter_btnClick(filterForm)"
-                [disabled]="filteredEnvironments.length == 0 && !filtering">
-                <i class="material-icons"
-                  [ngClass]="{'not-allowed': filteredEnvironments.length == 0 && !filtering}">done</i>
-              </button>
-            </div>
-          </td>
-        </tr>
-
-        <tr *ngIf="(!filteredEnvironments) && !filtering || (filteredEnvironments.length == 0) && !filtering"
-          class="message_block">
-          <td [colSpan]="!healthStatus?.billingEnabled ? filteringColumns.length -1 : filteringColumns.length">To start
-            working, please, create new environment</td>
-        </tr>
-
-        <tr *ngIf="(filteredEnvironments.length == 0) && filtering" class="message_block">
-          <td [colSpan]="!healthStatus?.billingEnabled ? filteringColumns.length -1 : filteringColumns.length">No
-            matches
-            found</td>
-        </tr>
-
-
-
-
-
-        <tr *ngFor="let env of filteredEnvironments;" class="dashboard_table_body"
-          [ngClass]="{'dropdown-outscreen': isOutscreenDropdown}">
-          <td (click)="printDetailEnvironmentModal(env)">{{env.name}}</td>
-          <td class="status" ngClass="{{env.status.toLowerCase() || ''}}">{{ env.status | underscoreless }}</td>
-          <td>{{env.shape}}</td>
-          <td>
-            <computational-resources-list [resources]="env.resources" [environment]="env"
-              (buildGrid)="buildGrid($event)">
-            </computational-resources-list>
-          </td>
-          <td *ngIf="healthStatus?.billingEnabled">
-            <span class="total_cost">{{ env.cost || 'N/A' }} {{ env.currency_code || '' }}</span>
-            <span (click)="env.billing && printCostDetails(env)" class="currency_details"
-              [ngClass]="{ 'not-allowed' : !env.billing }">
-              <i class="material-icons">help_outline</i>
-            </span>
-          </td>
-
-
-
-          <td class="settings">
-            <span #settings (click)="actions.toggle($event, settings)" class="actions"
-              [ngClass]="{ 'disabled': env.status.toLowerCase() === 'creating' }">
-            </span>
-
-            <bubble-up #actions class="list-menu" position="bottom-left" alternative="top-left">
-              <ul class="list-unstyled">
-                <div class="active-items" *ngIf="env.status.toLowerCase() !== 'failed'
-                && env.status !== 'terminating'
-                && env.status !== 'terminated'
-                && env.status !== 'creating image'">
-                  <li
-                    *ngIf="env.status !== 'stopped' && env.status !== 'stopping' && env.status !== 'starting' && env.status !== 'creating image'"
-                    matTooltip="Unable to stop notebook because at least one computational resource is in progress"
-                    matTooltipPosition="above" [matTooltipDisabled]="!isResourcesInProgress(env)">
-                    <div (click)="exploratoryAction(env, 'stop')"
-                      [ngClass]="{'not-allowed': isResourcesInProgress(env) }">
-                      <i class="material-icons">pause_circle_outline</i>
-                      <span>Stop</span>
-                    </div>
-                  </li>
-                  <li *ngIf="env.status.toLowerCase() === 'stopped' || env.status.toLowerCase() === 'stopping'"
-                    matTooltip="Unable to run notebook until it will be stopped" matTooltipPosition="above"
-                    [matTooltipDisabled]="!isResourcesInProgress(env) && env.status.toLowerCase() !== 'stopping'">
-                    <div (click)="exploratoryAction(env, 'run')"
-                      [ngClass]="{'not-allowed': isResourcesInProgress(env) || env.status.toLowerCase() === 'stopping' }">
-                      <i class="material-icons">play_circle_outline</i>
-                      <span>Run</span>
-                    </div>
-                  </li>
-                  <li *ngIf="env.status.toLowerCase() === 'running' || env.status.toLowerCase() === 'stopped'"
-                    matTooltip="Unable to terminate notebook because at least one computational resource is in progress"
-                    matTooltipPosition="above" [matTooltipDisabled]="!isResourcesInProgress(env)">
-                    <div (click)="exploratoryAction(env, 'terminate')"
-                      [ngClass]="{'not-allowed': isResourcesInProgress(env) }">
-                      <i class="material-icons">phonelink_off</i>
-                      <span>Terminate</span>
-                    </div>
-                  </li>
-                  <li (click)="exploratoryAction(env, 'deploy')" *ngIf="env.status != 'stopping'
-                && env.status !== 'stopped'
-                && env.status !== 'starting'
-                && env.status !== 'creating image'">
-                    <i class="material-icons">memory</i>
-                    <span>Add compute</span>
-                  </li>
-                  <li (click)="exploratoryAction(env, 'schedule')" *ngIf="env.status.toLowerCase() === 'running'
-                || env.status.toLowerCase() === 'stopped'">
-                    <i class="material-icons">schedule</i>
-                    <span>Scheduler</span>
-                  </li>
-                </div>
-                <li (click)="exploratoryAction(env, 'ami')"
-                  *ngIf="env.status === 'running' && DICTIONARY.cloud_provider !== 'gcp'">
-                  <i class="material-icons">view_module</i>
-                  <span>Create {{ DICTIONARY.image }}</span>
-                </li>
-                <li (click)="exploratoryAction(env, 'install')">
-                  <i class="material-icons">developer_board</i>
-                  <span>Manage libraries</span>
-                </li>
-                <li *ngIf="env.status === 'running'">
-                  <a target="_blank" [attr.href]="'/#/terminal/' + env.private_ip" class="navigate">
-                    <i class="material-icons">laptop</i>
-                    <span>Open terminal</span>
-                  </a>
-                </li>
-              </ul>
-            </bubble-up>
-          </td>
-        </tr>
-  </table> -->
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
index 40902a6..d6c0556 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
@@ -25,6 +25,8 @@
   width: 100%;
 
   .header-row {
+    position: sticky;
+    background-clip:padding-box;
     th {
       padding: 5px;
     }
@@ -53,6 +55,13 @@
           padding-right: 5px;
           padding-left: 24px;
           cursor: pointer;
+          overflow: hidden;
+          text-overflow: ellipsis;
+        }
+
+        &.resources-col {
+          overflow: hidden;
+          text-overflow: ellipsis;
         }
       }
     }
@@ -70,10 +79,11 @@
 
   .filter-row {
     height: 0 !important;
+    background-clip:padding-box;
 
     th {
       padding: 5px;
-
+      background-clip: padding-box;
       &:last-child {
         padding-right: 6px;
       }
@@ -84,11 +94,18 @@
     width: 18%;
     padding-right: 5px;
     padding-left: 24px;
+    background-color: inherit;
+    .label{
+      padding-top: 14px;
+    }
   }
 
   .status-col,
   .shape-col {
     width: 14%;
+    .label{
+      padding-top: 14px;
+    }
   }
 
   .tag-col {
@@ -109,18 +126,24 @@
 
   .resources-col {
     width: 28%;
+    .label{
+      padding-top: 14px;
+    }
   }
 
   .cost-col {
     width: 10%;
     text-align: center;
-
   }
 
   .actions-col {
     width: 10%;
     padding-right: 24px;
     text-align: right;
+    background-color: inherit;
+    .label{
+      padding-right: 5px;
+    }
   }
 }
 
@@ -215,6 +238,8 @@
 
 .filter-row .actions {
   text-align: right;
+  display: flex;
+  justify-content: flex-end;
 }
 
 .filter-row .actions button {
@@ -361,3 +386,16 @@
   line-height: 42px;
   font-size: 18px;
 }
+
+.info {
+  padding: 40px;
+  text-align: center;
+}
+
+.content-row{
+  background-clip: padding-box;
+}
+
+.not-allow{
+  cursor: not-allowed !important;
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
index 14014e7..3a7aabd 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
@@ -18,7 +18,7 @@
  */
 /* tslint:disable:no-empty */
 
-import { Component, OnInit } from '@angular/core';
+import {Component, Input, OnInit} from '@angular/core';
 import { animate, state, style, transition, trigger } from '@angular/animations';
 import { ToastrService } from 'ngx-toastr';
 import { MatDialog } from '@angular/material/dialog';
@@ -39,6 +39,13 @@
 import { SchedulerComponent } from '../scheduler';
 
 import { DICTIONARY } from '../../../dictionary/global.dictionary';
+import {ProgressBarService} from '../../core/services/progress-bar.service';
+import {ComputationModel} from '../computational/computational-resource.model';
+import {NotebookModel} from '../exploratory/notebook.model';
+
+
+
+
 
 @Component({
   selector: 'resources-grid',
@@ -56,6 +63,8 @@
 export class ResourcesGridComponent implements OnInit {
   readonly DICTIONARY = DICTIONARY;
 
+  @Input() projects: Array<any>;
+
   environments: Exploratory[];
 
   collapseFilterRow: boolean = false;
@@ -71,9 +80,9 @@
   public filteringColumns: Array<any> = [
     { title: 'Environment name', name: 'name', class: 'name-col', filter_class: 'name-filter', filtering: true },
     { title: 'Status', name: 'statuses', class: 'status-col', filter_class: 'status-filter', filtering: true },
-    { title: DICTIONARY.instance_size, name: 'shapes', class: 'shape-col', filter_class: 'shape-filter', filtering: true },
+    { title: 'Instance size', name: 'shapes', class: 'shape-col', filter_class: 'shape-filter', filtering: true },
     { title: 'Tags', name: 'tag', class: 'tag-col', filter_class: 'tag-filter', filtering: false },
-    { title: DICTIONARY.computational_resource, name: 'resources', class: 'resources-col', filter_class: 'resource-filter', filtering: true },
+    { title: 'Computational resource', name: 'resources', class: 'resources-col', filter_class: 'resource-filter', filtering: true },
     { title: 'Cost', name: 'cost', class: 'cost-col', filter_class: 'cost-filter', filtering: false },
     { title: '', name: 'actions', class: 'actions-col', filter_class: 'action-filter', filtering: false }
   ];
@@ -85,7 +94,8 @@
   constructor(
     public toastr: ToastrService,
     private userResourceService: UserResourceService,
-    private dialog: MatDialog
+    private dialog: MatDialog,
+    private progressBarService: ProgressBarService,
   ) { }
 
   ngOnInit(): void {
@@ -93,15 +103,15 @@
   }
 
   public buildGrid(): void {
+    setTimeout(() => {this.progressBarService.startProgressBar(); } , 0);
     this.userResourceService.getUserProvisionedResources()
       .subscribe((result: any) => {
-        this.filtering = false;
         this.environments = ExploratoryModel.loadEnvironments(result);
         this.getDefaultFilterConfiguration();
         (this.environments.length) ? this.getUserPreferences() : this.filteredEnvironments = [];
-
         this.healthStatus && !this.healthStatus.billingEnabled && this.modifyGrid();
-      });
+        this.progressBarService.stopProgressBar();
+      }, () => this.progressBarService.stopProgressBar());
   }
 
   public toggleFilterRow(): void {
@@ -123,15 +133,17 @@
     this.buildGrid();
   }
 
-  public containsNotebook(notebook_name: string): boolean {
-    if (notebook_name)
-      return this.environments
-        .filter(project => project.exploratory
-          .some(item => CheckUtils.delimitersFiltering(notebook_name) === CheckUtils.delimitersFiltering(item.name))).length > 0;
-  }
+  public containsNotebook(notebook_name: string, envoirmentNames: Array<string>): boolean {
+    if (notebook_name && envoirmentNames.length ) {
+        return envoirmentNames
+          .some(item => CheckUtils.delimitersFiltering(notebook_name) === CheckUtils.delimitersFiltering(item));
+      }
+      return false;
+   }
+
 
   public isResourcesInProgress(notebook) {
-    const env = this.getEnvironmentsListCopy().map(env => env.exploratory.find(el => el.name === notebook.name))[0];
+    const env = this.getResourceByName(notebook.name, notebook.project);
 
     if (env && env.resources.length) {
       return env.resources.filter(item => (item.status !== 'failed' && item.status !== 'terminated'
@@ -140,8 +152,71 @@
     return false;
   }
 
+  public filterActiveInstances(): FilterConfigurationModel {
+    return (<FilterConfigurationModel | any>Object).assign({}, this.filterConfiguration, {
+      statuses: SortUtils.activeStatuses(),
+      resources: SortUtils.activeStatuses(),
+      type: 'active',
+      project: this.activeProject || ''
+    });
+  }
+
+  public resetFilterConfigurations(): void {
+    this.filterForm.resetConfigurations();
+    this.updateUserPreferences(this.filterForm);
+    this.buildGrid();
+  }
+
+  public printDetailEnvironmentModal(data): void {
+    this.dialog.open(DetailDialogComponent, { data: data, panelClass: 'modal-lg' })
+      .afterClosed().subscribe(() => this.buildGrid());
+  }
+
+  public printCostDetails(data): void {
+    this.dialog.open(CostDetailsDialogComponent, { data: data, panelClass: 'modal-xl' })
+      .afterClosed().subscribe(() => this.buildGrid());
+  }
+
+  public exploratoryAction(data, action: string) {
+    const resource = this.getResourceByName(data.name, data.project);
+
+    if (action === 'deploy') {
+      this.dialog.open(ComputationalResourceCreateDialogComponent, { data: { notebook: resource, full_list: this.environments }, panelClass: 'modal-xxl' })
+        .afterClosed().subscribe(() => this.buildGrid());
+    } else if (action === 'run') {
+      this.userResourceService
+        .runExploratoryEnvironment({ notebook_instance_name: data.name, project_name: data.project })
+        .subscribe(
+          () => this.buildGrid(),
+          error => this.toastr.error(error.message || 'Exploratory starting failed!', 'Oops!'));
+    } else if (action === 'stop') {
+      this.dialog.open(ConfirmationDialogComponent, { data: { notebook: data, type: ConfirmationDialogType.StopExploratory }, panelClass: 'modal-sm' })
+        .afterClosed().subscribe(() => this.buildGrid());
+    } else if (action === 'terminate') {
+      this.dialog.open(ConfirmationDialogComponent, { data:
+          { notebook: data, type: ConfirmationDialogType.TerminateExploratory }, panelClass: 'modal-sm' })
+        .afterClosed().subscribe(() => this.buildGrid());
+    } else if (action === 'install') {
+      this.dialog.open(InstallLibrariesComponent, { data: data, panelClass: 'modal-fullscreen' })
+        .afterClosed().subscribe(() => this.buildGrid());
+    } else if (action === 'schedule') {
+      this.dialog.open(SchedulerComponent, { data: { notebook: data, type: 'EXPLORATORY' }, panelClass: 'modal-xl-s' })
+        .afterClosed().subscribe(() => this.buildGrid());
+    } else if (action === 'ami') {
+      this.dialog.open(AmiCreateDialogComponent, { data: data, panelClass: 'modal-sm' })
+
+        .afterClosed().subscribe(() => this.buildGrid());
+    }
+  }
+
 
   // PRIVATE
+  private getResourceByName(notebook_name: string, project_name: string) {
+    return this.getEnvironmentsListCopy().filter(environments => environments.project === project_name)
+      .map(env => env.exploratory.find(({ name }) => name === notebook_name))
+      .filter(name => !!name)[0];
+  }
+
   private getEnvironmentsListCopy() {
     return this.environments.map(env => JSON.parse(JSON.stringify(env)));
   }
@@ -165,6 +240,7 @@
   }
 
   private applyFilter_btnClick(config: FilterConfigurationModel) {
+
     let filteredData = this.getEnvironmentsListCopy();
 
     const containsStatus = (list, selectedItems) => {
@@ -203,33 +279,40 @@
       this.updateUserPreferences(config);
     }
 
+    let failedNotebooks = NotebookModel.notebook(this.getEnvironmentsListCopy());
+    failedNotebooks = SortUtils.flatDeep(failedNotebooks, 1).filter(notebook => notebook.status === 'failed');
+    if (this.filteredEnvironments.length && this.activeFiltering) {
+      let creatingNotebook = NotebookModel.notebook(this.filteredEnvironments);
+      creatingNotebook = SortUtils.flatDeep(creatingNotebook, 1).filter(resourse => resourse.status === 'creating');
+      const fail = failedNotebooks
+        .filter(v => creatingNotebook
+          .some(create => create.project === v.project && create.exploratory === v.exploratory && create.resource === v.resource));
+      if (fail.length) {
+        this.toastr.error('Creating notebook failed!', 'Oops!');
+      }
+    }
+
+    let failedResource = ComputationModel.computationRes(this.getEnvironmentsListCopy());
+    failedResource = SortUtils.flatDeep(failedResource, 2).filter(resourse => resourse.status === 'failed');
+    if (this.filteredEnvironments.length && this.activeFiltering) {
+      let creatingResource = ComputationModel.computationRes(this.filteredEnvironments);
+      creatingResource = SortUtils.flatDeep(creatingResource, 2).filter(resourse => resourse.status === 'creating');
+      const fail = failedResource
+        .filter(v => creatingResource
+          .some(create => create.project === v.project && create.exploratory === v.exploratory && create.resource === v.resource));
+      if (fail.length) {
+        this.toastr.error('Creating computation resource failed!', 'Oops!');
+      }
+    }
     this.filteredEnvironments = filteredData;
   }
 
-  private modifyGrid() {
+  private modifyGrid(): void {
     this.displayedColumns = this.displayedColumns.filter(el => el !== 'cost');
     this.displayedFilterColumns = this.displayedFilterColumns.filter(el => el !== 'cost-filter');
   }
 
-
-
-  filterActiveInstances(): FilterConfigurationModel {
-    const filteredData = (<any>Object).assign({}, this.filterConfiguration);
-    filteredData.project = this.activeProject || '';
-
-    for (const index in filteredData) {
-      if (filteredData[index] instanceof Array)
-        filteredData[index] = filteredData[index].filter((item: string) => {
-          return (item !== 'failed' && item !== 'terminated' && item !== 'terminating');
-        });
-      if (index === 'shapes') { filteredData[index] = []; }
-    }
-    filteredData.type = 'active';
-
-    return filteredData;
-  }
-
-  aliveStatuses(сonfig): void {
+  private aliveStatuses(сonfig): void {
     for (const index in this.filterConfiguration) {
       if (сonfig[index] && сonfig[index] instanceof Array)
         сonfig[index] = сonfig[index].filter(item => this.filterConfiguration[index].includes(item));
@@ -244,14 +327,7 @@
       if (filterConfig[index].length) this.activeFiltering = true;
   }
 
-  resetFilterConfigurations(): void {
-    this.filterForm.resetConfigurations();
-    this.updateUserPreferences(this.filterForm);
-    this.buildGrid();
-  }
-
-
-  getUserPreferences(): void {
+  private getUserPreferences(): void {
     this.userResourceService.getUserPreferences()
       .subscribe((result: FilterConfigurationModel) => {
         if (result) {
@@ -262,51 +338,13 @@
       }, () => this.applyFilter_btnClick(null));
   }
 
-  loadUserPreferences(config): FilterConfigurationModel {
+  private loadUserPreferences(config): FilterConfigurationModel {
     return new FilterConfigurationModel(config.name, config.statuses, config.shapes, config.resources, config.type, config.project);
   }
 
-  updateUserPreferences(filterConfiguration: FilterConfigurationModel): void {
+  private updateUserPreferences(filterConfiguration: FilterConfigurationModel): void {
     this.userResourceService.updateUserPreferences(filterConfiguration)
       .subscribe((result) => { },
         (error) => console.log('UPDATE USER PREFERENCES ERROR ', error));
   }
-
-  printDetailEnvironmentModal(data): void {
-    this.dialog.open(DetailDialogComponent, { data: data, panelClass: 'modal-lg' })
-      .afterClosed().subscribe(() => this.buildGrid());
-  }
-
-  printCostDetails(data): void {
-    this.dialog.open(CostDetailsDialogComponent, { data: data, panelClass: 'modal-xl' })
-      .afterClosed().subscribe(() => this.buildGrid());
-  }
-
-  exploratoryAction(data, action: string) {
-    if (action === 'deploy') {
-      this.dialog.open(ComputationalResourceCreateDialogComponent, { data: { notebook: data, full_list: this.environments }, panelClass: 'modal-xxl' })
-        .afterClosed().subscribe(() => this.buildGrid());
-    } else if (action === 'run') {
-      this.userResourceService
-        .runExploratoryEnvironment({ notebook_instance_name: data.name })
-        .subscribe(
-          () => this.buildGrid(),
-          error => this.toastr.error(error.message || 'Exploratory starting failed!', 'Oops!'));
-    } else if (action === 'stop') {
-      this.dialog.open(ConfirmationDialogComponent, { data: { notebook: data, type: ConfirmationDialogType.StopExploratory }, panelClass: 'modal-sm' })
-        .afterClosed().subscribe(() => this.buildGrid());
-    } else if (action === 'terminate') {
-      this.dialog.open(ConfirmationDialogComponent, { data: { notebook: data, type: ConfirmationDialogType.TerminateExploratory }, panelClass: 'modal-sm' })
-        .afterClosed().subscribe(() => this.buildGrid());
-    } else if (action === 'install') {
-      this.dialog.open(InstallLibrariesComponent, { data: data, panelClass: 'modal-fullscreen' })
-        .afterClosed().subscribe(() => this.buildGrid());
-    } else if (action === 'schedule') {
-      this.dialog.open(SchedulerComponent, { data: { notebook: data, type: 'EXPLORATORY' }, panelClass: 'modal-xl-s' })
-        .afterClosed().subscribe(() => this.buildGrid());
-    } else if (action === 'ami') {
-      this.dialog.open(AmiCreateDialogComponent, { data: data, panelClass: 'modal-sm' })
-        .afterClosed().subscribe(() => this.buildGrid());
-    }
-  }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.model.ts
index d9e4089..e769dbe 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.model.ts
@@ -22,6 +22,7 @@
   readonly DICTIONARY = DICTIONARY;
 
   constructor(
+    public cloud_provider: string,
     public name: Array<any>,
     public template_name: string,
     public image: string,
@@ -49,6 +50,7 @@
     public project: string,
     public endpoint: string,
     public tags: any,
+    public edgeNodeStatus: string
   ) { }
 
   public static loadEnvironments(data: Array<any>) {
@@ -56,35 +58,42 @@
       return data.map((value) => {
         return {
           project: value.project,
-          exploratory: value.exploratory.map(el => new ExploratoryModel(el.exploratory_name,
-            el.template_name,
-            el.image,
-            el.status,
-            el.shape,
-            el.computational_resources,
-            el.up_time,
-            el.exploratory_url,
-            value.shared[el.endpoint].edge_node_ip,
-            el.private_ip,
-            el.exploratory_user,
-            el.exploratory_pass,
-            value.shared[el.endpoint][DICTIONARY.bucket_name],
-            value.shared[el.endpoint][DICTIONARY.shared_bucket_name],
-            el.error_message,
-            el[DICTIONARY.billing.cost],
-            el[DICTIONARY.billing.currencyCode],
-            el.billing,
-            el.libs,
-            value.shared[el.endpoint][DICTIONARY.user_storage_account_name],
-            value.shared[el.endpoint][DICTIONARY.shared_storage_account_name],
-            value.shared[el.endpoint][DICTIONARY.datalake_name],
-            value.shared[el.endpoint][DICTIONARY.datalake_user_directory_name],
-            value.shared[el.endpoint][DICTIONARY.datalake_shared_directory_name],
-            el.project,
-            el.endpoint,
-            el.tags
-          ))
-        }
+          exploratory: value.exploratory.map(el => {
+            const provider = el.cloud_provider.toLowerCase();
+            const billing = value.exploratoryBilling.filter(res => res.name === el.exploratory_name)[0];
+            return new ExploratoryModel(
+              provider,
+              el.exploratory_name,
+              el.template_name,
+              el.image,
+              el.status,
+              el.shape,
+              el.computational_resources,
+              el.up_time,
+              el.exploratory_url,
+              value.shared[el.endpoint].edge_node_ip,
+              el.private_ip,
+              el.exploratory_user,
+              el.exploratory_pass,
+              value.shared[el.endpoint][DICTIONARY[provider].bucket_name],
+              value.shared[el.endpoint][DICTIONARY[provider].shared_bucket_name],
+              el.error_message,
+              billing ? billing.total_cost : '',
+              billing ? billing.currency : '',
+              billing,
+              el.libs,
+              value.shared[el.endpoint][DICTIONARY[provider].user_storage_account_name],
+              value.shared[el.endpoint][DICTIONARY[provider].shared_storage_account_name],
+              value.shared[el.endpoint][DICTIONARY[provider].datalake_name],
+              value.shared[el.endpoint][DICTIONARY[provider].datalake_user_directory_name],
+              value.shared[el.endpoint][DICTIONARY[provider].datalake_shared_directory_name],
+              el.project,
+              el.endpoint,
+              el.tags,
+              value.shared[el.endpoint].status
+            );
+          })
+        };
       });
     }
   }
@@ -92,5 +101,5 @@
 
 export interface Exploratory {
   project: string;
-  exploratory: ExploratoryModel[]
+  exploratory: ExploratoryModel[];
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.ts
index bab05a7..7eb6ff6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.ts
@@ -17,14 +17,14 @@
  * under the License.
  */
 
-import { Component, OnInit, ViewChild, OnDestroy } from '@angular/core';
+import { Component, OnInit, ViewChild } from '@angular/core';
 import { ToastrService } from 'ngx-toastr';
 import { MatDialog } from '@angular/material/dialog';
 
 import { ResourcesGridComponent } from './resources-grid/resources-grid.component';
 import { ExploratoryEnvironmentCreateComponent } from './exploratory/create-environment';
 import { Exploratory } from './resources-grid/resources-grid.model';
-import { HealthStatusService, ProjectService } from '../core/services';
+import {ApplicationSecurityService, HealthStatusService} from '../core/services';
 import { ManageUngitComponent } from './manage-ungit/manage-ungit.component';
 import { Project } from './../administration/project/project.component';
 
@@ -45,12 +45,11 @@
     public toastr: ToastrService,
     private healthStatusService: HealthStatusService,
     private dialog: MatDialog,
-    private projectService: ProjectService
+    private applicationSecurityService: ApplicationSecurityService
   ) { }
 
   ngOnInit() {
     this.getEnvironmentHealthStatus();
-    this.getProjects();
     this.exploratoryEnvironments = this.resourcesGrid.environments;
   }
 
@@ -61,8 +60,7 @@
 
   public refreshGrid(): void {
     this.resourcesGrid.buildGrid();
-    this.getProjects();
-    this.getEnvironmentHealthStatus();
+    this.checkAutorize();
     this.exploratoryEnvironments = this.resourcesGrid.environments;
   }
 
@@ -84,13 +82,15 @@
   }
 
   public getActiveProject() {
-    console.log('activeProject: ', this.resourcesGrid.activeProject);
 
     return this.resourcesGrid.activeProject;
   }
 
-  private getProjects() {
-    this.projectService.getUserProjectsList().subscribe((projects: any) => this.projects = projects);
+  private checkAutorize() {
+   this.applicationSecurityService.isLoggedIn().subscribe( () => {
+     this.getEnvironmentHealthStatus();
+     }
+   );
   }
 
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/index.ts b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/index.ts
index 30dad18..9b3a482 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/index.ts
@@ -26,6 +26,7 @@
 import { FormControlsModule } from '../../shared/form-controls';
 import { SchedulerComponent } from './scheduler.component';
 import { TimePickerModule } from '../../shared/time-picker';
+import { KeysPipeModule } from '../../core/pipes';
 
 export * from './scheduler.component';
 export * from './scheduler.model';
@@ -38,10 +39,11 @@
     FormControlsModule,
     MaterialModule,
     BubbleModule,
-    TimePickerModule
+    TimePickerModule,
+    KeysPipeModule
   ],
   declarations: [SchedulerComponent],
   entryComponents: [SchedulerComponent],
   exports: [SchedulerComponent]
 })
-export class SchedulerModule {}
+export class SchedulerModule { }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.html
index bedb7ac..a13ffa1 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.html
@@ -57,8 +57,7 @@
             </span>
           </div>
         </div>
-        <div class="schedule-by-time" *ngIf="!enableIdleTimeView"
-          [ngClass]="{ hide: enableIdleTimeView, resource: destination.type === 'СOMPUTATIONAL',
+        <div class="schedule-by-time" *ngIf="!enableIdleTimeView" [ngClass]="{ hide: enableIdleTimeView, resource: destination.type === 'СOMPUTATIONAL',
                        des: destination.image === 'docker.dlab-dataengine-service' }">
           <div class="row-wrap" *ngIf="destination.image !== 'docker.dlab-dataengine-service'">
             <div class="col-3">
@@ -82,17 +81,18 @@
               <mat-form-field>
                 <input matInput [matDatepicker]="terminateDate" placeholder="Choose terminate date"
                   formControlName="terminateDate">
-                <mat-datepicker-toggle
-                  matSuffix [for]="terminateDate"></mat-datepicker-toggle>
+                <mat-datepicker-toggle matSuffix [for]="terminateDate"></mat-datepicker-toggle>
                 <mat-datepicker #terminateDate></mat-datepicker>
               </mat-form-field>
             </div>
             <ng-template #timezone>
               <div class="col-3">
                 <mat-form-field class="timezone-offset">
-                  <mat-select placeholder="Select offset" [(value)]="tzOffset"
+                  <mat-select placeholder="Select timezone" [(value)]="tzOffset"
                     [disabled]="destination.type === 'СOMPUTATIONAL' && inherit || !enableSchedule">
-                    <mat-option *ngFor="let zone of zones" [value]="zone">{{ zone }}</mat-option>
+                    <mat-option *ngFor="let zone of zones | keys" [value]="zone.key" matTooltip="{{ zone.value }}"
+                      matTooltipShowDelay="1000" matTooltipPosition="above"> GMT {{zone.key}} {{ zone.value }}
+                    </mat-option>
                   </mat-select>
                 </mat-form-field>
               </div>
@@ -121,12 +121,15 @@
             </div>
           </div>
 
-          <div class="row-wrap" *ngIf="destination.type === 'СOMPUTATIONAL' && destination.image !== 'docker.dlab-dataengine-service'">
+          <div class="row-wrap"
+            *ngIf="destination.type === 'СOMPUTATIONAL' && destination.image !== 'docker.dlab-dataengine-service'">
             <div class="col-3">
               <mat-form-field class="timezone-offset">
-                <mat-select placeholder="Select offset" [(value)]="tzOffset"
+                <mat-select placeholder="Select timezone" [(value)]="tzOffset"
                   [disabled]="destination.type === 'СOMPUTATIONAL' && inherit || !enableSchedule">
-                  <mat-option *ngFor="let zone of zones" [value]="zone">{{ zone }}</mat-option>
+                  <mat-option *ngFor="let zone of zones | keys" [value]="zone.key" matTooltip="{{ zone.value }}"
+                    matTooltipShowDelay="1000" matTooltipPosition="above"> GMT {{zone.key}} {{ zone.value }}
+                  </mat-option>
                 </mat-select>
               </mat-form-field>
             </div>
@@ -167,9 +170,11 @@
               </div>
               <div class="col-3">
                 <mat-form-field class="timezone-offset">
-                  <mat-select placeholder="Select offset" [(value)]="tzOffset"
+                  <mat-select placeholder="Select timezone" [(value)]="tzOffset"
                     [disabled]="destination.type === 'СOMPUTATIONAL' && inherit || !enableSchedule">
-                    <mat-option *ngFor="let zone of zones" [value]="zone">{{ zone }}</mat-option>
+                    <mat-option *ngFor="let zone of zones | keys" [value]="zone.key" matTooltip="{{ zone.value }}"
+                      matTooltipShowDelay="1000" matTooltipPosition="above"> GMT {{zone.key}} {{ zone.value }}
+                    </mat-option>
                   </mat-select>
                 </mat-form-field>
               </div>
@@ -179,7 +184,8 @@
             </div>
           </div>
 
-          <div class="options" *ngIf="destination && allowInheritView && destination.image !== 'docker.dlab-dataengine-service'">
+          <div class="options"
+            *ngIf="destination && allowInheritView && destination.image !== 'docker.dlab-dataengine-service'">
             <mat-slide-toggle labelPosition="after" [checked]="inherit" (change)="toggleInherit($event)"
               [disabled]="!enableSchedule || (!parentInherit && destination.type === 'СOMPUTATIONAL')">
               <span *ngIf="destination.type === 'EXPLORATORY'; else resourcePropagation" class="hold-label">
@@ -205,9 +211,10 @@
       </form>
       <div class="text-center m-top-30">
         <button mat-raised-button type="button" class="butt action" (click)="dialogRef.close()">Cancel</button>
-        <button mat-raised-button type="button" class="butt butt-success action" [disabled]="enableIdleTime && !schedulerForm.controls.inactivityTime.valid"
+        <button mat-raised-button type="button" class="butt butt-success action"
+          [disabled]="enableIdleTime && !schedulerForm.controls.inactivityTime.valid"
           (click)="scheduleInstance_btnClick()">Save</button>
       </div>
     </div>
   </div>
-</div>
\ No newline at end of file
+</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
index 9ca21d6..8b31445 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
@@ -25,11 +25,13 @@
 import * as _moment from 'moment';
 import 'moment-timezone';
 
+
 import { SchedulerService } from '../../core/services';
 import { SchedulerModel, WeekdaysModel } from './scheduler.model';
 import { SchedulerCalculations } from './scheduler.calculations';
 import { HTTP_STATUS_CODES, CheckUtils } from '../../core/util';
 import { ScheduleSchema } from './scheduler.model';
+import { map } from 'rxjs/operators';
 
 @Component({
   selector: 'dlab-scheduler',
@@ -38,6 +40,7 @@
   encapsulation: ViewEncapsulation.None
 })
 export class SchedulerComponent implements OnInit {
+
   readonly CheckUtils = CheckUtils;
 
   public model: SchedulerModel;
@@ -59,7 +62,7 @@
   public weekdays: string[] = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
   public schedulerForm: FormGroup;
   public destination: any;
-  public zones: Array<any> = [];
+  public zones: {};
   public tzOffset: string = _moment().format('Z');
   public startTime = { hour: 9, minute: 0, meridiem: 'AM' };
   public endTime = { hour: 8, minute: 0, meridiem: 'PM' };
@@ -68,9 +71,7 @@
   public inactivityLimits = { min: 120, max: 10080 };
   public integerRegex: string = '^[0-9]*$';
 
-  // @ViewChild('bindDialog') bindDialog;
   @ViewChild('resourceSelect', { static: false }) resource_select;
-  // @Output() buildGrid: EventEmitter<{}> = new EventEmitter();
 
   constructor(
     @Inject(MAT_DIALOG_DATA) public data: any,
@@ -82,19 +83,18 @@
   ) { }
 
   ngOnInit() {
-    // this.bindDialog.onClosing = () => {
-    //   this.resetDialog();
-    //   this.buildGrid.emit();
-    // };
     this.open(this.data.notebook, this.data.type, this.data.resource);
   }
 
   public open(notebook, type, resource?): void {
     this.notebook = notebook;
     this.zones = _moment.tz.names()
-      .map(el => _moment.tz(el).format('Z'))
-      .filter((item, pos, ar) => ar.indexOf(item) === pos)
-      .sort();
+      .map(item => [_moment.tz(item).format('Z'), item])
+      .sort()
+      .reduce((memo, item) => {
+        memo[item[0]] ? memo[item[0]] += `, ${item[1]}` : memo[item[0]] = item[1];
+        return memo;
+      }, {});
 
     this.model = new SchedulerModel(
       response => {
@@ -115,13 +115,12 @@
 
         if (this.destination.type === 'СOMPUTATIONAL') {
           this.allowInheritView = true;
-          this.getExploratorySchedule(this.notebook.name, this.destination.computational_name);
+          this.getExploratorySchedule(this.notebook.project, this.notebook.name, this.destination.computational_name);
           this.checkParentInherit();
         } else if (this.destination.type === 'EXPLORATORY') {
           this.allowInheritView = this.checkIsActiveSpark();
-          this.getExploratorySchedule(this.notebook.name);
+          this.getExploratorySchedule(this.notebook.project, this.notebook.name);
         }
-        // this.bindDialog.open(param);
       },
       this.schedulerService
     );
@@ -139,7 +138,7 @@
     this.inherit = $event.checked;
 
     if (this.destination.type === 'СOMPUTATIONAL' && this.inherit) {
-      this.getExploratorySchedule(this.notebook.name);
+      this.getExploratorySchedule(this.notebook.project, this.notebook.name);
       this.schedulerForm.get('startDate').disable();
     } else {
       this.schedulerForm.get('startDate').enable();
@@ -248,28 +247,24 @@
     };
 
     if (this.destination.type === 'СOMPUTATIONAL') {
-      this.model.confirmAction(this.notebook.name, parameters, this.destination.computational_name);
+      this.model.confirmAction(this.notebook.project, this.notebook.name, parameters, this.destination.computational_name);
     } else {
       parameters['consider_inactivity'] = this.considerInactivity;
-      this.model.confirmAction(this.notebook.name, parameters);
+      this.model.confirmAction(this.notebook.project, this.notebook.name, parameters);
     }
   }
 
   private setScheduleByInactivity() {
-    const data = {sync_start_required : this.parentInherit, check_inactivity_required: this.enableIdleTime, max_inactivity: this.schedulerForm.controls.inactivityTime.value };
+    const data = {
+      sync_start_required: this.parentInherit,
+      check_inactivity_required: this.enableIdleTime,
+      max_inactivity: this.schedulerForm.controls.inactivityTime.value
+    };
     (this.destination.type === 'СOMPUTATIONAL')
-      ? this.setInactivity(this.notebook.name, data, this.destination.computational_name)
-      : this.setInactivity(this.notebook.name, { ...data, consider_inactivity: this.considerInactivity });
+      ? this.setInactivity(this.notebook.project, this.notebook.name, data, this.destination.computational_name)
+      : this.setInactivity(this.notebook.project, this.notebook.name, { ...data, consider_inactivity: this.considerInactivity });
   }
 
-  // public close(): void {
-  //   if (this.bindDialog.isOpened) {
-  //     this.bindDialog.close();
-  //   }
-
-  //   this.resetDialog();
-  // }
-
   private formInit(start?: string, end?: string, terminate?: string) {
     this.schedulerForm = this.formBuilder.group({
       startDate: { disabled: this.inherit, value: start ? _moment(start).format() : null },
@@ -280,8 +275,8 @@
     });
   }
 
-  private getExploratorySchedule(resource, resource2?) {
-    this.schedulerService.getExploratorySchedule(resource, resource2).subscribe(
+  private getExploratorySchedule(project, resource, resource2?) {
+    this.schedulerService.getExploratorySchedule(project, resource, resource2).subscribe(
       (params: ScheduleSchema) => {
         if (params) {
           params.start_days_repeat.filter(key => (this.selectedStartWeekDays[key.toLowerCase()] = true));
@@ -310,7 +305,7 @@
   }
 
   private checkParentInherit() {
-    this.schedulerService.getExploratorySchedule(this.notebook.name)
+    this.schedulerService.getExploratorySchedule(this.notebook.project, this.notebook.name)
       .subscribe((res: any) => this.parentInherit = res.sync_start_required);
   }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
index f83be29..c0093ee 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
@@ -61,13 +61,13 @@
     if (this.continueWith) this.continueWith();
   }
 
-  private scheduleInstance(notebook, params, resourse) {
-    return this.schedulerService.setExploratorySchedule(notebook, params, resourse);
+  private scheduleInstance(project, notebook, params, resourse) {
+    return this.schedulerService.setExploratorySchedule(project, notebook, params, resourse);
   }
 
   public setInactivityTime(params) {
-    const [notebook, data, resource] = params;
-    return this.scheduleInstance(notebook, data, resource);
+    const [project, notebook, data, resource] = params;
+    return this.scheduleInstance(project, notebook, data, resource);
   }
 
   public resetSchedule(notebook, resourse) {
@@ -75,8 +75,8 @@
   }
 
   private prepareModel(fnProcessResults: any, fnProcessErrors: any): void {
-    this.confirmAction = (notebook, data, resourse?) =>
-      this.scheduleInstance(notebook, data, resourse).subscribe(
+    this.confirmAction = (project, notebook, data, resourse?) =>
+      this.scheduleInstance(project, notebook, data, resourse).subscribe(
         response => fnProcessResults(response),
         error => fnProcessErrors(error)
       );
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/bubble/bubble.component.css b/services/self-service/src/main/resources/webapp/src/app/shared/bubble/bubble.component.css
index 90c88aa..331f715 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/bubble/bubble.component.css
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/bubble/bubble.component.css
@@ -18,7 +18,7 @@
  */
 
 .bubble-up {
-  width: 100%;  
+  width: 100%;
   background: #fff;
   border: none;
   display: none;
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/dropdowns.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/dropdowns.component.scss
index d7ec247..a177f5c 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/dropdowns.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/dropdowns.component.scss
@@ -122,6 +122,14 @@
       }
     }
   }
+
+  &.resources {
+    .list-menu {
+      .list-item {
+        text-transform: capitalize;
+      }
+    }
+  }
 }
 
 .dropdown-list .list-menu a,
@@ -158,7 +166,7 @@
   color: #35afd5;
   display: block;
   padding: 0;
-  line-height: 40px;
+  line-height: 40px !important;
   text-align: center;
 }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
index bac0dd6..4ea5a14 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
@@ -25,6 +25,7 @@
 import { DirectivesModule } from '../../core/directives';
 import { KeysPipeModule, UnderscorelessPipeModule } from '../../core/pipes';
 import { BubbleModule } from '..';
+import {MultiLevelSelectDropdownComponent} from './multi-level-select-dropdown/multi-level-select-dropdown.component';
 
 export * from './multi-select-dropdown/multi-select-dropdown.component';
 export * from './dropdown-list/dropdown-list.component';
@@ -37,7 +38,7 @@
     UnderscorelessPipeModule,
     BubbleModule
   ],
-  declarations: [DropdownListComponent, MultiSelectDropdownComponent],
-  exports: [DropdownListComponent, MultiSelectDropdownComponent]
+  declarations: [DropdownListComponent, MultiSelectDropdownComponent, MultiLevelSelectDropdownComponent],
+  exports: [DropdownListComponent, MultiSelectDropdownComponent, MultiLevelSelectDropdownComponent]
 })
 export class FormControlsModule {}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html
new file mode 100644
index 0000000..420aa09
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html
@@ -0,0 +1,108 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing,
+  ~ software distributed under the License is distributed on an
+  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  ~ KIND, either express or implied.  See the License for the
+  ~ specific language governing permissions and limitations
+  ~ under the License.
+  -->
+
+<div class="dropdown-multiselect btn-group" ngClass="{{type || ''}}">
+  <button type="button" #list (click)="multiactions.toggle($event, list)">
+    <span class="ellipsis" *ngIf="model.length === 0">Select roles</span>
+    <span class="selected-items ellipsis" *ngIf="model.length !== 0">
+      {{selectedRolesList()}}
+    </span>
+    <span class="caret-btn"><i class="material-icons">keyboard_arrow_down</i></span>
+  </button>
+
+  <bubble-up #multiactions position="bottom" [keep-open]="true" class="mt-5">
+    <ul class="list-menu" id="scrolling">
+      <li class="filter-actions">
+        <a class="select_all" (click)="selectAllOptions($event)">
+          <i class="material-icons">playlist_add_check</i>&nbsp;All
+        </a>
+        <a class="deselect_all" (click)="deselectAllOptions($event)">
+          <i class="material-icons">clear</i>&nbsp;None
+        </a>
+      </li>
+
+        <ng-template  ngFor let-item [ngForOf]="items" let-i="index">
+          <li class="role-label" role="presentation" *ngIf="i === 0 || model && item.type !== items[i - 1].type" (click)="toggleItemsForLable(item.type, $event)" >
+            <a href="#" class="list-item" role="menuitem">
+              <span class="arrow" [ngClass]="{'rotate-arrow': isOpenCategory[item.type], 'arrow-checked': selectedAllInCattegory(item.type) || selectedSomeInCattegory(item.type)}">
+                <i class="material-icons">keyboard_arrow_right</i>
+              </span>
+              <span class="empty-checkbox" [ngClass]="{'checked': selectedAllInCattegory(item.type) || selectedSomeInCattegory(item.type)}" (click)="toggleselectedCategory($event, model, item.type);$event.stopPropagation()" >
+                <span class="checked-checkbox" *ngIf="selectedAllInCattegory(item.type)"></span>
+                <span class="line-checkbox" *ngIf="selectedSomeInCattegory(item.type)"></span>
+              </span>
+              {{labels[item.type] || item.type | titlecase}}
+            </a>
+          </li>
+
+          <li class="role-item"
+              role="presentation"
+              *ngIf="model && isOpenCategory[item.type] && item.type !== 'COMPUTATIONAL_SHAPE' && item.type !== 'NOTEBOOK_SHAPE'"
+              [hidden]="!isAdmin && item.role === 'Allow to execute administration operation'"
+          >
+            <a href="#" class="list-item" role="menuitem" (click)="toggleSelectedOptions($event, model, item)">
+              <span class="empty-checkbox" [ngClass]="{'checked': checkInModel(item.role)}">
+                <span class="checked-checkbox" *ngIf="checkInModel(item.role)"></span>
+              </span>
+              {{item.role}}
+            </a>
+          </li>
+          <li class="role-item" role="presentation" (click)="toggleItemsForCloud(item.type + item.cloud, $event)"
+              *ngIf="model && isOpenCategory[item.type] && item.type === 'COMPUTATIONAL_SHAPE' && item.cloud !== items[i - 1].cloud
+              || model && isOpenCategory[item.type] && item.type === 'NOTEBOOK_SHAPE' && item.type !== items[i - 1].type
+              || model && isOpenCategory[item.type] && item.type === 'NOTEBOOK_SHAPE' && item.cloud !== items[i - 1].cloud
+              || model && isOpenCategory[item.type] && item.type === 'COMPUTATIONAL_SHAPE' && item.type !== items[i - 1].type"
+          >
+            <a href="#" class="list-item" role="menuitem">
+              <span class="arrow" [ngClass]="{'rotate-arrow': isCloudOpen[item.type + item.cloud], 'arrow-checked': selectedAllInCloud(item.type, item.cloud) || selectedSomeInCloud(item.type, item.cloud)}">
+                 <i class="material-icons">keyboard_arrow_right</i>
+              </span>
+              <span class="empty-checkbox"
+                    [ngClass]="{
+                    'checked': selectedAllInCloud(item.type, item.cloud)
+                    || selectedSomeInCloud(item.type, item.cloud)}"
+                    (click)="toggleSelectedCloud($event, model, item.type, item.cloud);
+                    $event.stopPropagation()"
+              >
+                <span class="checked-checkbox" *ngIf="selectedAllInCloud(item.type, item.cloud)"></span>
+                <span class="line-checkbox" *ngIf="selectedSomeInCloud(item.type, item.cloud)"></span>
+              </span>
+              {{item.cloud}}
+            </a>
+          </li>
+          <li class="role-cloud-item" role="presentation" *ngIf="model && isCloudOpen[item.type + item.cloud] && isOpenCategory[item.type]" >
+            <a href="#" class="list-item" role="menuitem" (click)="toggleSelectedOptions($event, model, item)">
+              <span class="empty-checkbox" [ngClass]="{'checked': checkInModel(item.role)}">
+                <span class="checked-checkbox" *ngIf="checkInModel(item.role)"></span>
+              </span>
+              {{item.role}}
+            </a>
+          </li>
+
+        </ng-template>
+
+      <li *ngIf="items?.length == 0">
+        <a role="menuitem" class="list-item">
+          <span class="material-icons">visibility_off</span>
+          No {{type}}
+        </a>
+      </li>
+    </ul>
+  </bubble-up>
+</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss
new file mode 100644
index 0000000..a066dd5
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss
@@ -0,0 +1,324 @@
+/*!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+.dropdown-list,
+.dropdown-multiselect {
+  width: 100%;
+  position: relative;
+}
+
+.dropdown-list button,
+.dropdown-multiselect button {
+  height: 38px;
+  width: 100%;
+  background: #fff;
+  padding-left: 15px;
+  font-size: 14px;
+  // height: 34px;
+  text-align: left;
+  white-space: nowrap;
+  cursor: pointer;
+  border-radius: 0;
+  border: none;
+  outline: none;
+  box-shadow: 0 3px 1px -2px rgba(0, 0, 0, .2), 0 2px 2px 0 rgba(0, 0, 0, .14), 0 1px 5px 0 rgba(0, 0, 0, .12);
+}
+
+.dropdown-list {
+  button {
+    line-height: 38px;
+
+    span {
+      color: #4a5c89;
+
+      em {
+        font-size: 13px;
+        color: #35afd5;
+        margin-right: 0px;
+        font-style: normal;
+      }
+    }
+  }
+}
+
+.dropdown-list button:active,
+.dropdown-list button:focus,
+.dropdown-multiselect button:active,
+.dropdown-multiselect button:focus {
+  box-shadow: 0 5px 5px -3px rgba(0, 0, 0, .2), 0 8px 10px 1px rgba(0, 0, 0, .14), 0 3px 14px 2px rgba(0, 0, 0, .12);
+}
+
+.dropdown-multiselect {
+  button {
+    span {
+      color: #999;
+      font-weight: 300;
+      display: inline-block;
+      max-width: 80%;
+    }
+
+    .selected-items {
+      color: #4a5c89;
+      max-width: 477px;
+    }
+  }
+}
+
+.selected-items strong {
+  font-weight: 300;
+}
+
+.dropdown-list,
+.dropdown-multiselect {
+  .caret-btn {
+    position: absolute;
+    top: 0;
+    right: 0;
+    width: 40px;
+    height: 100%;
+    text-align: center;
+    padding: 7px;
+    -webkit-appearance: none;
+    -moz-appearance: none;
+    border-left: 1px solid #ececec;
+    background: #fff;
+    color: #36afd5 !important;
+  }
+
+  .list-menu {
+    width: 100%;
+    max-height: 450px;
+    left: 0;
+    padding: 0;
+    margin: 0;
+    overflow-y: auto;
+    overflow-x: hidden;
+
+    li {
+      padding: 0;
+      margin: 0;
+    }
+    .role-item{
+      padding-left: 30px;
+    }
+    .role-cloud-item{
+      padding-left: 60px;
+    }
+
+  }
+
+  &.statuses {
+    .list-menu {
+      .list-item {
+        text-transform: capitalize;
+      }
+    }
+  }
+
+  &.resources {
+    .list-menu {
+      .list-item {
+        text-transform: capitalize;
+      }
+    }
+  }
+}
+
+.dropdown-list .list-menu a,
+.dropdown-multiselect .list-menu li a {
+  display: block;
+  padding: 10px;
+  padding-left: 15px;
+  position: relative;
+  font-weight: 300;
+  cursor: pointer;
+  color: #4a5c89;
+  text-decoration: none;
+}
+
+.dropdown-multiselect .list-menu li a {
+  padding-left: 45px;
+  transition: all .45s ease-in-out;
+}
+
+.dropdown-list .list-menu a:hover,
+.dropdown-multiselect .list-menu a:hover {
+  background: #f7f7f7;
+  color: #35afd5;
+}
+
+.dropdown-multiselect .list-menu .filter-actions {
+  display: flex;
+  cursor: pointer;
+  border-bottom: 1px solid #ececec;
+}
+
+.dropdown-multiselect .list-menu .filter-actions a {
+  width: 50%;
+  color: #35afd5;
+  display: block;
+  padding: 0;
+  line-height: 40px !important;
+  text-align: center;
+}
+
+.dropdown-list {
+
+  .list-menu,
+  .title {
+    span {
+      display: flex;
+      justify-content: space-between;
+      align-items: center;
+      font-weight: 300;
+    }
+  }
+}
+
+.dropdown-list .list-menu li span.caption {
+  display: block;
+  padding: 10px 15px;
+  cursor: default;
+}
+
+.dropdown-list .list-menu li i,
+.dropdown-list .list-menu li strong {
+  display: inline-block;
+  width: 30px;
+  text-align: center;
+  vertical-align: middle;
+  color: #35afd5;
+  line-height: 26px;
+}
+
+.dropdown-list .list-menu li i {
+  vertical-align: sub;
+  font-size: 18px;
+}
+
+.dropdown-list .list-menu a {
+  padding: 12px;
+  padding-left: 15px;
+  position: relative;
+  font-weight: 300;
+  cursor: pointer;
+
+  em {
+    font-size: 13px;
+    color: #35afd5;
+    margin-right: 0px;
+    font-style: normal;
+  }
+}
+
+.dropdown-list .list-menu a.empty {
+  height: 36px;
+}
+
+.dropdown-multiselect .list-menu .filter-actions i {
+  vertical-align: sub;
+  color: #35afd5;
+  font-size: 18px;
+  line-height: 26px;
+  transition: all .45s ease-in-out;
+}
+
+.dropdown-multiselect .list-menu .select_all:hover,
+.dropdown-multiselect .list-menu .select_all:hover i {
+  color: #4eaf3e !important;
+  background: #f9fafb;
+}
+
+.dropdown-multiselect .list-menu .deselect_all:hover,
+.dropdown-multiselect .list-menu .deselect_all:hover i {
+  color: #f1696e !important;
+  background: #f9fafb;
+}
+
+.dropdown-multiselect .list-menu a {
+  span {
+    position: absolute;
+    top: 10px;
+    left: 25px;
+    color: #35afd5;
+
+    &.checked-checkbox {
+      top: 0px;
+      left: 4px;
+      width: 5px;
+      height: 10px;
+      border-bottom: 2px solid white;
+      border-right: 2px solid white;
+      position: absolute;
+      transform: rotate(45deg);
+    }
+
+    &.line-checkbox {
+      top: 0px;
+      left: 2px;
+      width: 8px;
+      height: 7px;
+      border-bottom: 2px solid white;
+    }
+
+    &.arrow{
+      left: 2px;
+      top: 9px;
+      i{
+        color: lightgrey;
+      }
+      &.rotate-arrow{
+        transform: rotate(90deg);
+        transition: .1s ease-in-out;
+        top: 6px;
+        left: 0;
+      }
+
+      &.arrow-checked{
+        i{
+          color: #36afd5
+        }
+      }
+    }
+  }
+
+
+}
+
+.dropdown-multiselect.btn-group.open .dropdown-toggle {
+  box-shadow: none;
+}
+
+.empty-checkbox {
+  width: 16px;
+  height: 16px;
+  border-radius: 2px;
+  border: 2px solid lightgrey;
+  margin-top: 2px;
+  position: relative;
+  &.checked {
+    border-color: #35afd5;
+    background-color: #35afd5;
+  }
+}
+
+.d-none{
+  display: none;
+}
+
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts
new file mode 100644
index 0000000..5b9c1a9
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { Input, Output, Component, EventEmitter } from '@angular/core';
+
+@Component({
+  selector: 'multi-level-select-dropdown',
+  templateUrl: 'multi-level-select-dropdown.component.html',
+  styleUrls: ['multi-level-select-dropdown.component.scss']
+})
+
+export class MultiLevelSelectDropdownComponent {
+
+  @Input() items: Array<any>;
+  @Input() model: Array<any>;
+  @Input() type: string;
+  @Input() isAdmin: boolean;
+  @Output() selectionChange: EventEmitter<{}> = new EventEmitter();
+
+  public isOpenCategory = {
+  };
+
+  public isCloudOpen = {
+
+  };
+
+  public labels = {
+    COMPUTATIONAL_SHAPE: 'Compute shapes',
+    NOTEBOOK_SHAPE: 'Notebook shapes',
+    COMPUTATIONAL: 'Compute'
+  };
+
+  toggleSelectedOptions($event, model, value) {
+    $event.preventDefault();
+    const currRole = model.filter(v => v.role === value.role).length;
+    currRole ? this.model = model.filter(v => v.role !== value.role) : model.push(value);
+    this.onUpdate($event);
+  }
+
+  toggleselectedCategory($event, model, value) {
+    $event.preventDefault();
+    const categoryItems = this.items.filter(role => role.type === value);
+    this.selectedAllInCattegory(value) ? this.model = this.model.filter(role => role.type !== value) : categoryItems.forEach(role => {
+      if (!model.filter(mod => mod.role === role.role).length) {this.model.push(role); }
+    });
+    this.onUpdate($event);
+  }
+
+  toggleSelectedCloud($event, model, category, cloud) {
+    $event.preventDefault();
+    const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+    this.selectedAllInCloud(category, cloud) ? this.model = this.model.filter(role => {
+      if (role.type === category && role.cloud === cloud) {
+        return false;
+      }
+      return true;
+    }) : categoryItems.forEach(role => {
+      if (!model.filter(mod => mod.role === role.role).length) {this.model.push(role); }
+    });
+    this.onUpdate($event);
+  }
+
+  selectAllOptions($event) {
+    $event.preventDefault();
+    this.model = [...this.items];
+    this.onUpdate($event);
+    $event.preventDefault();
+  }
+
+  deselectAllOptions($event) {
+    this.model = [];
+    this.onUpdate($event);
+    $event.preventDefault();
+  }
+
+  onUpdate($event): void {
+    this.selectionChange.emit({ model: this.model, type: this.type, $event });
+  }
+
+  public toggleItemsForLable(label, $event) {
+    this.isOpenCategory[label] = !this.isOpenCategory[label];
+    this.isCloudOpen[label + 'AWS'] = false;
+    this.isCloudOpen[label + 'GCP'] = false;
+    this.isCloudOpen[label + 'AZURE'] = false;
+    $event.preventDefault();
+  }
+
+  public toggleItemsForCloud(label, $event) {
+    this.isCloudOpen[label] = !this.isCloudOpen[label];
+    $event.preventDefault();
+  }
+
+  public selectedAllInCattegory(category) {
+    const selected = this.model.filter(role => role.type === category);
+    const categoryItems = this.items.filter(role => role.type === category);
+    return selected.length === categoryItems.length;
+  }
+
+  public selectedSomeInCattegory(category) {
+    const selected = this.model.filter(role => role.type === category);
+    const categoryItems = this.items.filter(role => role.type === category);
+    return selected.length && selected.length !== categoryItems.length;
+  }
+
+  public selectedAllInCloud(category, cloud) {
+    const selected = this.model.filter(role => role.type === category && role.cloud === cloud);
+    const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+    return selected.length === categoryItems.length;
+  }
+
+  public selectedSomeInCloud(category, cloud) {
+    const selected = this.model.filter(role => role.type === category && role.cloud === cloud);
+    const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+    return selected.length && selected.length !== categoryItems.length;
+  }
+
+  public checkInModel(item) {
+    return this.model.filter(v => v.role === item).length;
+  }
+
+  public selectedRolesList() {
+    return this.model.map(role => role.role).join(',');
+  }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-select-dropdown/multi-select-dropdown.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-select-dropdown/multi-select-dropdown.component.html
index 4ce2029..60744c4 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-select-dropdown/multi-select-dropdown.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-select-dropdown/multi-select-dropdown.component.html
@@ -40,12 +40,13 @@
           <li role="presentation" *ngIf="model">
             <a href="#" class="list-item" role="menuitem" (click)="toggleSelectedOptions($event, model, item)">
               <span class="material-icons" *ngIf="model.indexOf(item) >= 0">done</span>
-              {{item}}
+              <ng-container *ngIf="type[0] !== 'resource_type'">{{item}}</ng-container>
+              <ng-container *ngIf="type[0] === 'resource_type'">{{item | titlecase}}</ng-container>
             </a>
           </li>
         </ng-template>
       <li *ngIf="items?.length == 0">
-        <a role="menuitem">
+        <a role="menuitem" class="list-item">
           <span class="material-icons">visibility_off</span>
           No {{type}}
         </a>
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/material.module.ts b/services/self-service/src/main/resources/webapp/src/app/shared/material.module.ts
index 46b0597..c677b05 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/material.module.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/material.module.ts
@@ -26,7 +26,7 @@
 import { MatCardModule } from '@angular/material/card';
 import { MatCheckboxModule } from '@angular/material/checkbox';
 import { MatChipsModule } from '@angular/material/chips';
-import { MatNativeDateModule } from '@angular/material/core';
+import {MAT_HAMMER_OPTIONS, MatNativeDateModule} from '@angular/material/core';
 import { MatDatepickerModule } from '@angular/material/datepicker';
 import { MatDialogModule } from '@angular/material/dialog';
 import { MatExpansionModule } from '@angular/material/expansion';
@@ -51,7 +51,7 @@
 import { MatTabsModule } from '@angular/material/tabs';
 import { MatToolbarModule } from '@angular/material/toolbar';
 import { MatTooltipModule } from '@angular/material/tooltip';
-import { DateAdapter } from '@angular/material/core';
+import {STEPPER_GLOBAL_OPTIONS} from "@angular/cdk/stepper";
 
 @NgModule({
   exports: [
@@ -87,7 +87,21 @@
     MatPaginatorModule,
     MatSortModule,
     MatTableModule
-  ]
+  ],
+  providers: [
+    {
+      provide: MAT_HAMMER_OPTIONS,
+      useValue: {
+        cssProps: {
+          userSelect: true
+        }
+      },
+    },
+    {
+      provide: STEPPER_GLOBAL_OPTIONS,
+      useValue: { displayDefaultIndicatorType: false }
+    }
+  ],
 })
 
 export class MaterialModule {}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts
index 701f03f..ca05251 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts
@@ -21,5 +21,6 @@
   StopExploratory = 0,
   TerminateExploratory = 1,
   TerminateComputationalResources = 2,
-  StopEdgeNode = 3
+  StopEdgeNode = 3,
+  deleteUser = 4,
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
index a19d773..b80443a 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
@@ -23,16 +23,23 @@
       <span *ngIf="model.notebook.name && model.notebook.name !== 'edge node'">
         <span>{{ confirmationType ? 'Terminate' : 'Stop' }} notebook: {{ model.notebook.name }}</span>
       </span>
-      <span *ngIf="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node'">
+      <span *ngIf="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node' || data.type === 4 && data.notebook.length">
         <i class="material-icons">priority_high</i>Warning
       </span>
+      <span *ngIf="data.type === 4 && !data.notebook.length">
+        Update group data
+      </span>
     </h4>
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </header>
   <div class="dialog-content">
     <div class="content-box">
-      <p class="info text-center">{{ model.title }}</p>
-
+      <p *ngIf="data.type !== 4" class="info text-center">{{ model.title }}</p>
+      <div *ngIf="data.type === 4" class="text-center m-bot-20">
+        <h3 class="strong">Group data will be updated.</h3>
+      </div>
+      <p *ngIf="data.type === 4 && data.notebook.length" class="text-center delete-user">User<span *ngIf="data.notebook.length>1">s</span>  <span class="strong"> {{data.notebook.join(', ')}} </span>will be deleted from this group.</p>
+<!--        All <span *ngIf="data.notebook.length===1">his</span><span *ngIf="data.notebook.length>1">their</span> resources authorized within this group will be terminated.-->
       <mat-list class="resources"
         [hidden]="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node'
                                   || !model.notebook.resources || model.notebook.resources.length === 0 || (!isAliveResources && !confirmationType) || onlyKilled">
@@ -48,18 +55,19 @@
             <div class="status" [ngClass]="{ 'stopped': !confirmationType && resource.image === 'docker.dlab-dataengine',
                   'terminated': resource.image === 'docker.dlab-dataengine-service' || confirmationType }">
               {{ (!confirmationType && resource.image === 'docker.dlab-dataengine') ? 'Stopped' : 'Terminated' }}</div>
-            <div class="size">{{ resource[DICTIONARY[resource.image].master_node_shape] }}</div>
+            <div class="size">{{ resource[DICTIONARY[notebook.cloud_provider.toLowerCase()][resource.image].master_node_shape] }} </div>
           </mat-list-item>
         </div>
       </mat-list>
 
       <div class="text-center m-top-20">
-        <p>Do you want to proceed?</p>
+        <p class="strong">Do you want to proceed?</p>
       </div>
       <div class="text-center m-top-20">
         <button mat-raised-button type="button" class="butt action" (click)="dialogRef.close()">No</button>
-        <button mat-raised-button type="button" class="butt butt-success action" (click)="confirm()">Yes</button>
+        <button *ngIf="data.type !== 4" mat-raised-button type="button" class="butt butt-success action" (click)="confirm()">Yes</button>
+        <button *ngIf="data.type === 4" mat-raised-button type="button" class="butt butt-success action" (click)="dialogRef.close(true)">Yes</button>
       </div>
     </div>
   </div>
-</div>
\ No newline at end of file
+</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
index ab3a434..c71e2ed 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
@@ -18,6 +18,9 @@
  */
 
 .confirmation-dialog {
+  h3{
+    margin-bottom: 20px;
+  }
   color: #718ba6;
   p {
     font-size: 14px;
@@ -30,13 +33,13 @@
   }
   .resources {
     .cluster {
-      width: 45%;
+      width: 40%;
     }
     .status {
       width: 30%;
     }
     .size {
-      width: 25%;
+      width: 30%;
     }
     .scrolling-content {
       max-height: 200px;
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
index 5b05247..79b0512 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
@@ -57,10 +57,10 @@
   public isAliveResources(resources): boolean {
     if (resources) {
       for (let i = 0; i < resources.length; i++) {
-        if (resources[i].status.toLowerCase() != 'failed'
-          && resources[i].status.toLowerCase() != 'terminated'
-          && resources[i].status.toLowerCase() != 'terminating'
-          && resources[i].status.toLowerCase() != 'stopped')
+        if (resources[i].status.toLowerCase() !== 'failed'
+          && resources[i].status.toLowerCase() !== 'terminated'
+          && resources[i].status.toLowerCase() !== 'terminating'
+          && resources[i].status.toLowerCase() !== 'stopped')
           return true;
       }
     }
@@ -71,19 +71,19 @@
 
   private stopExploratory(): Observable<{}> {
     return this.manageAction
-      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.name)
+      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.project, this.notebook.name)
       : this.userResourceService.suspendExploratoryEnvironment(this.notebook, 'stop');
   }
 
   private terminateExploratory(): Observable<{}> {
-    return this.manageAction
-      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'terminate', this.notebook.name)
-      : this.userResourceService.suspendExploratoryEnvironment(this.notebook, 'terminate');
+    return this.manageAction ? this.manageEnvironmentsService.environmentManagement(
+        this.notebook.user, 'terminate', this.notebook.project,  this.notebook.name
+      ) : this.userResourceService.suspendExploratoryEnvironment(this.notebook, 'terminate');
   }
 
   private stopEdgeNode(): Observable<{}> {
     return this.manageAction
-      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', 'edge')
+      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.project, 'edge')
       : this.healthStatusService.suspendEdgeNode();
   }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/edge-action-dialog/edge-action-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/edge-action-dialog/edge-action-dialog.component.ts
new file mode 100644
index 0000000..cf74931
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/edge-action-dialog/edge-action-dialog.component.ts
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import {Component, Inject, OnDestroy} from '@angular/core';
+import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog';
+
+
+@Component({
+  selector: 'edge-action-dialog',
+  template: `
+  <div id="dialog-box" *ngIf="data.type">
+    <header class="dialog-header">
+      <h4 class="modal-title"><span class="action">{{data.type | titlecase}}</span> edge node</h4>
+      <button type="button" class="close" (click)="this.dialogRef.close()">&times;</button>
+    </header>
+      <div mat-dialog-content class="content message mat-dialog-content">
+          <h3 class="strong">Select the edge nodes you want to {{data.type}}</h3>
+          <ul class="endpoint-list scrolling-content">
+            <li *ngIf="data.item.length>1" class="endpoint-list-item header-item">
+              <label class="strong all">
+                <input type="checkbox" [(ngModel)]="isAllChecked" (change)="chooseAll()">
+                {{data.type | titlecase}} all
+              </label>
+            </li>
+            <div class="scrolling-content" id="scrolling">
+            <li *ngFor="let endpoint of data.item" class="endpoint-list-item">
+                <label class="strong">
+                    <input type="checkbox" [(ngModel)]="endpoint.checked" (change)="endpointAction()">
+                    {{endpoint.name}}
+                </label>
+            </li>
+            </div>
+          </ul>
+
+      <p class="m-top-20 action-text"><span class="strong">Do you want to proceed?</span></p>
+
+      <div class="text-center m-top-30 m-bott-30">
+        <button type="button" class="butt" mat-raised-button (click)="this.dialogRef.close()">No</button>
+        <button type="button" class="butt butt-success" mat-raised-button (click)="dialogRef.close(endpointsNewStatus)" [disabled]="!endpointsNewStatus.length">Yes</button>
+      </div>
+      </div>
+  </div>
+  `,
+  styles: [`
+    .content { color: #718ba6; padding: 20px 50px; font-size: 14px; font-weight: 400; margin: 0; }
+    .info { color: #35afd5; }
+    .info .confirm-dialog { color: #607D8B; }
+    header { display: flex; justify-content: space-between; color: #607D8B; }
+    header h4 i { vertical-align: bottom; }
+    header a i { font-size: 20px; }
+    header a:hover i { color: #35afd5; cursor: pointer; }
+    .endpoint-list{text-align: left; margin-top: 30px}
+    .endpoint-list-item{padding: 5px 20px}
+    .action{text-transform: capitalize}
+    .action-text { text-align: center; }
+    .scrolling-content{overflow-y: auto; max-height: 200px; }
+    label { font-size: 15px; font-weight: 300; font-family: "Open Sans",sans-serif; cursor: pointer; display: flex; align-items: center; padding-left: 10px}
+    label input {margin-top: 2px; margin-right: 10px;cursor: pointer;}
+    .all{font-size: 16px; padding-left: 0; font-weight: 500}
+    .scrolling-content{overflow-y: auto; max-height: 200px;}
+  `]
+})
+
+export class EdgeActionDialogComponent implements OnDestroy {
+  public endpointsNewStatus: Array<object> = [];
+  public isAllChecked: boolean;
+  constructor(
+    public dialogRef: MatDialogRef<EdgeActionDialogComponent>,
+    @Inject(MAT_DIALOG_DATA) public data: any) {
+  }
+
+  public endpointAction() {
+    this.endpointsNewStatus = this.data.item.filter(endpoint => endpoint.checked);
+    this.isAllChecked = this.endpointsNewStatus.length === this.data.item.length;
+  }
+
+  public chooseAll() {
+    if (this.isAllChecked) {
+      this.data.item.forEach(endpoint => endpoint.checked = true);
+    } else {
+      this.clearCheckedNodes();
+    }
+    this.endpointAction();
+  }
+
+  public clearCheckedNodes() {
+    this.data.item.forEach(endpoint => endpoint.checked = false);
+  }
+
+  ngOnDestroy(): void {
+    this.clearCheckedNodes();
+    this.isAllChecked = false;
+  }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/edge-action-dialog/index.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/edge-action-dialog/index.ts
new file mode 100644
index 0000000..fd6e98a
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/edge-action-dialog/index.ts
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { NgModule } from '@angular/core';
+import { CommonModule } from '@angular/common';
+import { EdgeActionDialogComponent } from './edge-action-dialog.component';
+import { MaterialModule } from '../../material.module';
+import {FormsModule} from "@angular/forms";
+
+export * from './edge-action-dialog.component';
+
+@NgModule({
+  imports: [CommonModule, MaterialModule, FormsModule],
+  declarations: [EdgeActionDialogComponent],
+  entryComponents: [EdgeActionDialogComponent],
+  exports: [EdgeActionDialogComponent]
+})
+export class EdgeActionDialogModule {}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/index.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/index.ts
index 7f66ebc..501f2c6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/index.ts
@@ -22,11 +22,12 @@
 
 import { NotificationDialogComponent } from './notification-dialog.component';
 import { MaterialModule } from '../../material.module';
+import {FormsModule} from "@angular/forms";
 
 export * from './notification-dialog.component';
 
 @NgModule({
-  imports: [CommonModule, MaterialModule],
+  imports: [CommonModule, MaterialModule, FormsModule],
   declarations: [NotificationDialogComponent],
   entryComponents: [NotificationDialogComponent],
   exports: [NotificationDialogComponent]
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
index b0c7085..20ec20f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
@@ -19,49 +19,133 @@
 
 import { Component, Inject } from '@angular/core';
 import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog';
+import {Endpoint} from '../../../administration/project/project.component';
 
 @Component({
   selector: 'notification-dialog',
   template: `
-  <div id="dialog-box">
-    <header class="dialog-header">
-      <h4 class="modal-title"><i class="material-icons">priority_high</i>Warning</h4>
-      <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
-    </header>
-    <div mat-dialog-content class="content message">
-      <div *ngIf="data.type === 'list'" class="info">
-        <div *ngIf="data.template.notebook.length > 0">
-          Following notebook server<span *ngIf="data.template.notebook.length>1">s </span>
-          <span *ngFor="let item of data.template.notebook">
-            <b>{{ item.exploratory_name }}</b>
-            <span *ngIf="data.template.notebook.length > 1">, </span>
-          </span> will be stopped and all computational resources will be stopped/terminated
-        </div>
+      <div id="dialog-box">
+          <header class="dialog-header">
+              <h4 class="modal-title"><i class="material-icons">priority_high</i>Warning</h4>
+              <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
+          </header>
+          <div mat-dialog-content class="content message">
+            <div *ngIf="data.type === 'terminateNode'" class="table-header">
+              <div *ngIf="data.item.action.endpoint.length > 0">
+                Edge node<span *ngIf="data.item.action.endpoint.length>1">s</span>
+                        <span class="strong">{{ ' ' + data.item.action.endpoint.join(', ') }}</span> in project
+                <span class="strong">{{ data.item.action.project_name }}</span> will be terminated.
+              </div>
+            </div>
+              <div *ngIf="data.type === 'list'" class="info">
+                  <div *ngIf="data.template.notebook.length > 0">
+                      Following notebook server<span *ngIf="data.template.notebook.length>1">s </span>
+                      <span *ngFor="let item of data.template.notebook">
+                        <span class="strong">{{ item.exploratory_name }}</span>
+                        <span *ngIf="data.template.notebook.length > 1">, </span>
+                      </span> will be stopped and all computational resources will be stopped/terminated
+                  </div>
 
-        <div *ngIf="data.template.cluster.length > 0">
-          <p *ngFor="let item of data.template.cluster">
-              Computational resource<span *ngIf="data.template.cluster.length > 1">s </span>
-              <b>{{ item.computational_name }}</b> on <b>{{ item.exploratory_name }}</b>
-              will be stopped
-          </p>
-        </div>
-        <strong>by a schedule in 15 minutes.</strong>
+                  <div *ngIf="data.template.cluster.length > 0">
+                      <p *ngFor="let item of data.template.cluster">
+                          Computational resource<span *ngIf="data.template.cluster.length > 1">s </span>
+                          <span class="strong">{{ item.computational_name }}</span> on <span
+                              class="strong">{{ item.exploratory_name }}</span>
+                          will be stopped
+                      </p>
+                  </div>
+                  <span class="strong">by a schedule in 15 minutes.</span>
+              </div>
+              <div *ngIf="data.type === 'message'"><span [innerHTML]="data.template"></span></div>
+              <div *ngIf="data.type === 'confirmation'" class="confirm-dialog">
+                  <p *ngIf="data.template; else label">
+                      <span [innerHTML]="data.template"></span>
+                  </p>
+                  <ng-template #label>
+                      <p>
+            <span *ngIf="!!data.list">Endpoint</span>
+            <span *ngIf="data.action && data.action === 'decommissioned'">Project</span>
+            <span class="ellipsis strong" matTooltip="{{ data.item.name }}" matTooltipPosition="above"
+                  [matTooltipDisabled]="data.item.name.length > 35">
+             {{ data.item.name }}</span> will be {{ data.action || 'disconnected' }}.
+                      </p>
+                  </ng-template>
+
+                  <div *ngIf="data.list && data.list.length && data.type === 'confirmation'">
+                      <div class="resource-list">
+                          <div class="resource-list-header">
+                              <div class="resource-name">Resource</div>
+                              <div class="project">Project</div>
+                          </div>
+                          <div class="scrolling-content resource-heigth">
+                              <div class="resource-list-row sans node" *ngFor="let project of data.list">
+                                  <div class="resource-name ellipsis">
+                                      <div>Edge node</div>
+                                      <div *ngFor="let notebook of project.resource">{{notebook.exploratory_name}}</div>
+                                  </div>
+                                  <div class="project ellipsis">{{project.name}}</div>
+                              </div>
+                          </div>
+                      </div>
+<!--                      <div class="confirm-resource-terminating">-->
+<!--                          <label>-->
+<!--                              <input class="checkbox" type="checkbox"-->
+<!--                                     (change)="terminateResource()"/>Do not terminate all related resources-->
+<!--                          </label>-->
+<!--                      </div>-->
+                      <p class="confirm-message">
+                          <span *ngIf="!willNotTerminate">All connected computational resources will be terminated as well.</span>
+                      </p>
+                  </div>
+                  <mat-list *ngIf="data.item.endpoints?.length">
+                      <mat-list-item class="list-header sans">
+                          <div class="endpoint">Edge node in endpoint</div>
+                          <div class="status">Further status</div>
+                      </mat-list-item>
+                      <div class="scrolling-content">
+                          <mat-list-item *ngFor="let endpoint of filterEndpoints()" class="sans node">
+                              <div class="endpoint ellipsis">{{endpoint.name}}</div>
+                              <div class="status terminated">Terminated</div>
+                          </mat-list-item>
+                      </div>
+                  </mat-list>
+                <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
+                  <div class="text-center m-top-30 m-bott-10">
+                      <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
+                      <button *ngIf="!this.willNotTerminate" type="button" class="butt butt-success" mat-raised-button
+                              (click)="dialogRef.close('terminate')">Yes
+                      </button>
+                      <button *ngIf="this.willNotTerminate" type="button" class="butt butt-success" mat-raised-button
+                              (click)="dialogRef.close('noTerminate')">Yes
+                      </button>
+                  </div>
+              </div>
+               <div class="confirm-dialog" *ngIf="data.type === 'terminateNode'">
+                   <mat-list *ngIf="data.item.resources.length > 0">
+                     <mat-list-item class="list-header sans">
+                       <div class="endpoint">Resources</div>
+                       <div class="status">Further status</div>
+                     </mat-list-item>
+                     <div class="scrolling-content">
+                       <mat-list-item *ngFor="let resource of data.item.resources" class="sans node">
+                         <div class="endpoint ellipsis">{{resource}}</div>
+                         <div class="status terminated">Terminated</div>
+                       </mat-list-item>
+                     </div>
+                   </mat-list>
+                   <div mat-dialog-content class="bottom-message" *ngIf="data.item.resources.length > 0">
+                     <span class="confirm-message">All connected computational resources will be terminated as well.</span>
+                   </div>
+                 <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
+                 <div class="text-center m-top-30 m-bott-10">
+                   <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
+                   <button type="button" class="butt butt-success" mat-raised-button
+                           (click)="dialogRef.close(true)">Yes
+                   </button>
+                 </div>
+               </div>
+          </div>
       </div>
-      <div *ngIf="data.type === 'message'"><span [innerHTML]="data.template"></span></div>
-      <div *ngIf="data.type === 'confirmation'" class="confirm-dialog">
-        <p>
-          <strong class="ellipsis label-name" matTooltip="{{ data.item.name }}" matTooltipPosition="above" [matTooltipDisabled]="data.item.name.length > 35">
-          {{ data.item.name }}</strong> will be {{ data.action || 'decommissioned' }}.
-        </p>
-        <p class="m-top-20"><strong>Do you want to proceed?</strong></p>
-      
-        <div class="text-center m-top-30 m-bott-10">
-          <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
-          <button type="button" class="butt butt-success" mat-raised-button (click)="dialogRef.close(true)">Yes</button>
-        </div>
-      </div>
-    </div>
-  </div>
   `,
   styles: [`
     .content { color: #718ba6; padding: 20px 50px; font-size: 14px; font-weight: 400; margin: 0; }
@@ -72,15 +156,43 @@
     header a i { font-size: 20px; }
     header a:hover i { color: #35afd5; cursor: pointer; }
     .plur { font-style: normal; }
-    .label-name { display: inline-block; width: 100% }
+    .scrolling-content{overflow-y: auto; max-height: 200px; }
+    .endpoint { width: 70%; text-align: left; color: #577289;}
+    .status { width: 30%;text-align: left;}
+    .label { font-size: 15px; font-weight: 500; font-family: "Open Sans",sans-serif;}
+    .node { font-weight: 300;}
+    .resource-name { width: 280px;text-align: left; padding: 10px 0;line-height: 26px;}
+    .project { width: 30%;text-align: left; padding: 10px 0;line-height: 26px;}
+    .resource-list{max-width: 100%; margin: 0 auto;margin-top: 20px; }
+    .resource-list-header{display: flex; font-weight: 600; font-size: 16px;height: 48px; border-top: 1px solid #edf1f5; border-bottom: 1px solid #edf1f5; padding: 0 20px;}
+    .resource-list-row{display: flex; border-bottom: 1px solid #edf1f5;padding: 0 20px;}
+    .confirm-resource-terminating{text-align: left; padding: 10px 20px;}
+    .confirm-message{color: #ef5c4b;font-size: 13px;min-height: 18px; text-align: center; padding-top: 20px}
+    .checkbox{margin-right: 5px;vertical-align: middle; margin-bottom: 3px;}
+    label{cursor: pointer}
+    .bottom-message{padding-top: 15px;}
+    .table-header{padding-bottom: 10px;}
+
+
   `]
 })
 export class NotificationDialogComponent {
+  public willNotTerminate: boolean = false;
+  public notFailedEndpoints: Endpoint [];
   constructor(
     public dialogRef: MatDialogRef<NotificationDialogComponent>,
     @Inject(MAT_DIALOG_DATA) public data: any
   ) {
-    console.log(data);
+    if (this.data.list) {
+    this.willNotTerminate = !this.data.list.length;
+    }
+  }
 
+  public terminateResource(): void {
+    this.willNotTerminate = !this.willNotTerminate;
+  }
+
+  public filterEndpoints() {
+    return this.data.item.endpoints.filter(e => e.status !== 'FAILED' && e.status !== 'TERMINATED');
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/index.ts b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/index.ts
index c5f19a4..7a6f2d4 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/index.ts
@@ -25,6 +25,7 @@
 
 import { NavbarComponent } from './navbar.component';
 import { NotificationDialogModule } from '../modal-dialog/notification-dialog';
+import {EdgeActionDialogModule} from "../modal-dialog/edge-action-dialog";
 
 export * from './navbar.component';
 
@@ -34,8 +35,9 @@
     RouterModule,
     MaterialModule,
     NotificationDialogModule,
+    EdgeActionDialogModule,
     ProgressDialogModule,
-    BubbleModule
+    BubbleModule,
   ],
   declarations: [NavbarComponent],
   exports: [NavbarComponent, RouterModule]
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html
index 57e9a8a..d8b2322 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html
@@ -18,6 +18,7 @@
   -->
 
 <div class="nav-bar" [hidden]="!isLoggedIn">
+  <mat-progress-bar *ngIf="showProgressBar" mode="indeterminate"></mat-progress-bar>
   <div class="menu-area" *ngIf="healthStatus">
 
     <button class="hamburger" (click)="collapse()">
@@ -65,39 +66,58 @@
 
 </div>
 <mat-sidenav-container class="example-container" autosize>
-  <mat-sidenav #drawer mode="side" opened role="navigation" [style.width]="isExpanded ? '220px' : '60px'">
+  <mat-sidenav #drawer mode="side" opened role="navigation" [style.width]="isExpanded ? '220px' : '60px'" disableClose>
     <mat-nav-list>
       <nav>
-        <a class="nav-item" [routerLink]="['/resources_list']" [routerLinkActive]="['active']"
-          [routerLinkActiveOptions]="{exact:true}">
-          <span *ngIf="isExpanded; else resources">List of Resources</span>
-          <ng-template #resources><i class="material-icons">dashboard</i></ng-template>
-        </a>
-        <a class="nav-item has-children" *ngIf="healthStatus?.admin">
-          <span *ngIf="isExpanded">Administration</span>
-
-          <a class="sub-nav-item" [style.margin-left.px]="isExpanded ? '30' : '0'" [routerLink]="['/roles']"
-            [routerLinkActive]="['active']" [routerLinkActiveOptions]="{exact:true}">
-            <span *ngIf="isExpanded; else roles">Roles</span>
-            <ng-template #roles><i class="material-icons">account_box</i></ng-template>
-          </a>
-          <a class="sub-nav-item" [style.margin-left.px]="isExpanded ? '30' : '0'" [routerLink]="['/projects']"
-            [routerLinkActive]="['active']" [routerLinkActiveOptions]="{exact:true}">
-            <span *ngIf="isExpanded; else projects">Projects</span>
-            <ng-template #projects><i class="material-icons">dns</i></ng-template>
-          </a>
-          <a class="sub-nav-item" [style.margin-left.px]="isExpanded ? '30' : '0'"
-            [routerLink]="['/environment_management']" [routerLinkActive]="['active']"
+        <div>
+          <a class="nav-item" [routerLink]="['/resources_list']" [routerLinkActive]="['active']"
             [routerLinkActiveOptions]="{exact:true}">
-            <span *ngIf="isExpanded; else env">Environment Management</span>
-            <ng-template #env><i class="material-icons">settings</i></ng-template>
+            <span *ngIf="isExpanded; else resources">List of Resources</span>
+            <ng-template #resources><i class="material-icons">dashboard</i></ng-template>
           </a>
-        </a>
-        <a *ngIf="healthStatus?.billingEnabled" class="nav-item" [routerLink]="['/billing_report']"
-          [routerLinkActive]="['active']" [routerLinkActiveOptions]="{exact:true}">
-          <span *ngIf="isExpanded; else billing">Billing Report</span>
-          <ng-template #billing><i class="material-icons">account_balance_wallet</i></ng-template>
-        </a>
+          <a class="nav-item has-children" *ngIf="healthStatus?.admin || healthStatus?.projectAdmin">
+            <span *ngIf="isExpanded">Administration</span>
+
+            <a class="sub-nav-item" [style.margin-left.px]="isExpanded ? '30' : '0'" [routerLink]="['/roles']"
+              [routerLinkActive]="['active']" [routerLinkActiveOptions]="{exact:true}">
+              <span *ngIf="isExpanded; else roles">Roles</span>
+              <ng-template #roles><i class="material-icons">account_box</i></ng-template>
+            </a>
+            <a class="sub-nav-item" [style.margin-left.px]="isExpanded ? '30' : '0'" [routerLink]="['/projects']"
+              [routerLinkActive]="['active']" [routerLinkActiveOptions]="{exact:true}">
+              <span *ngIf="isExpanded; else projects">Projects</span>
+              <ng-template #projects><i class="material-icons">dns</i></ng-template>
+            </a>
+            <a class="sub-nav-item" [style.margin-left.px]="isExpanded ? '30' : '0'"
+              [routerLink]="['/environment_management']" [routerLinkActive]="['active']"
+              [routerLinkActiveOptions]="{exact:true}">
+              <span *ngIf="isExpanded; else env">Environment Management</span>
+              <ng-template #env><i class="material-icons">settings</i></ng-template>
+            </a>
+          </a>
+          <a *ngIf="healthStatus?.billingEnabled" class="nav-item" [routerLink]="['/billing_report']"
+            [routerLinkActive]="['active']" [routerLinkActiveOptions]="{exact:true}">
+            <span *ngIf="isExpanded; else billing">Billing Report</span>
+            <ng-template #billing><i class="material-icons">account_balance_wallet</i></ng-template>
+          </a>
+        </div>
+<!--        <div>-->
+<!--          <a class="nav-item" [routerLink]="['/swagger']" [routerLinkActive]="['active']"-->
+<!--            [routerLinkActiveOptions]="{exact:true}">-->
+<!--            <span *ngIf="isExpanded; else endpoint">Cloud Endpoint API</span>-->
+<!--            <ng-template #endpoint>-->
+<!--              <span>-->
+<!--                <svg width="30px" height="27px" viewBox="0 0 256 256" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="xMidYMid">-->
+<!--                  <g>-->
+<!--                    <path d="M127.059657,255.996921 C58.8506544,255.526472 -0.457073619,198.918442 0.00265506057,126.998303 C0.444649399,57.7958628 57.9516598,-0.468967577 129.11002,0.00284555012 C198.267128,0.462386081 256.613109,57.8667711 255.995136,128.194199 C256.568091,197.883453 197.934268,256.489189 127.059657,255.996921 Z M127.059657,255.996921 C58.8506544,255.526472 -0.457073619,198.918442 0.00265506057,126.998303 C0.444649399,57.7958628 57.9516598,-0.468967577 129.11002,0.00284555012 C198.267128,0.462386081 256.613109,57.8667711 255.995136,128.194199 C256.568091,197.883453 197.934268,256.489189 127.059657,255.996921 Z" fill="#FFFFFF"></path>-->
+<!--                    <path id="swager-bgr" d="M127.184644,238.997327 C68.0323765,238.589271 16.6036091,189.498744 17.0023028,127.131428 C17.3860285,67.1185953 67.2554,16.5917106 128.963117,17.0024872 C188.934544,17.4010221 239.531905,67.1825241 238.995778,128.169251 C239.492444,188.602381 188.64743,239.424426 127.184644,238.997327 Z M127.184644,238.997327 C68.0323765,238.589271 16.6036091,189.498744 17.0023028,127.131428 C17.3860285,67.1185953 67.2554,16.5917106 128.963117,17.0024872 C188.934544,17.4010221 239.531905,67.1825241 238.995778,128.169251 C239.492444,188.602381 188.64743,239.424426 127.184644,238.997327 Z" fill="#577289"></path>-->
+<!--                    <path d="M169.327319,127.956161 C169.042723,133.246373 164.421106,137.639224 159.866213,136.872586 C159.844426,136.872586 159.821277,136.872586 159.798128,136.872586 C154.753021,136.879395 150.658383,132.794288 150.652936,127.749182 C150.824511,122.690458 155.019915,118.703395 160.08,118.789182 C165.125106,118.813692 169.59966,123.077182 169.327319,127.956161 Z M88.2011915,179.220161 C90.1034894,179.27599 92.0071489,179.235139 94.2008511,179.235139 L94.2008511,193.021012 C80.5661277,195.326373 69.3348085,191.455054 66.5787234,179.929607 C65.6350638,175.69199 65.0549787,171.380841 64.8425532,167.04382 C64.5497872,162.452161 65.0563404,157.808756 64.706383,153.225267 C63.7368511,140.613182 62.1028085,136.30748 50,135.711054 L50,120.014714 C50.8674043,119.81182 51.7470638,119.662033 52.6321702,119.562629 C59.2677447,119.23582 62.0646809,117.201437 63.5489362,110.665267 C64.2243404,106.992756 64.6246809,103.275309 64.7431489,99.5428839 C65.268766,92.3258627 65.0822128,84.991735 66.2845957,77.8918201 C68.0221277,67.6245861 74.3962553,62.6366712 84.9249362,62.0783733 C87.9206809,61.9176925 90.9259574,62.0538627 94.3206809,62.0538627 L94.3206809,76.1447563 C92.9235745,76.2441605 91.6435745,76.4470542 90.3717447,76.4089265 C81.7916596,76.146118 81.3477447,79.0683308 80.7213617,86.1709691 C80.3305532,90.6250967 80.8697872,95.1554797 80.5661277,99.6245861 C80.2488511,104.071905 79.6537872,108.496075 78.7850213,112.869863 C77.547234,119.208586 73.6500426,123.922799 68.2495319,127.92348 C78.7332766,134.745607 79.9261277,145.346458 80.6069787,156.110714 C80.9732766,161.895224 80.8057872,167.720586 81.3926809,173.476501 C81.8502128,177.944246 83.5877447,179.08399 88.2011915,179.220161 Z M97.0372766,118.789182 C97.0917447,118.789182 97.1448511,118.789182 97.1993191,118.789182 C102.211745,118.872246 106.209702,123.002288 106.126638,128.016075 C106.126638,128.180841 106.121191,128.344246 106.11166,128.50765 C105.829787,133.407054 101.630298,137.149012 96.7308936,136.867139 C96.5334468,136.871224 96.3373617,136.867139 96.1399149,136.857607 C91.1506383,136.609778 87.3065532,132.36399 87.554383,127.374714 C87.8022128,122.385437 92.048,118.541352 97.0372766,118.789182 Z M128.273362,118.789182 C133.755574,118.746969 137.396766,122.29965 137.425362,127.719224 C137.455319,133.284501 134.003404,136.845352 128.556596,136.868501 C123.017191,136.893012 119.370553,133.389352 119.340596,128.002458 C119.324255,127.727395 119.32017,127.452331 119.32834,127.177267 C119.482213,122.390884 123.486979,118.635309 128.273362,118.789182 Z M193.673191,111.92348 C195.131574,117.370288 197.970723,119.284841 203.704851,119.546288 C204.644426,119.589863 205.579915,119.749182 206.868085,119.892161 L206.868085,135.584416 C206.170894,135.813182 205.456,135.984756 204.730213,136.096416 C197.046128,136.574373 193.54383,139.726714 192.76766,147.431224 C192.272,152.349692 192.312851,157.322629 191.972426,162.258799 C191.829447,167.678373 191.336511,173.082969 190.49634,178.438544 C188.535489,188.142033 182.477277,192.982884 172.467404,193.573863 C169.245617,193.764501 166.000681,193.60382 162.526979,193.60382 L162.526979,179.578288 C164.396596,179.462544 166.046979,179.303224 167.701447,179.263735 C173.682043,179.120756 175.796766,177.192586 176.089532,171.252841 C176.413617,164.727565 176.555234,158.194118 176.846638,151.66748 C177.270128,142.233607 179.853277,133.806033 188.641702,127.922118 C183.612936,124.336756 179.575489,119.994288 178.529702,114.138969 C177.264681,107.041778 176.85617,99.7879903 176.175319,92.5913946 C175.838979,88.9937776 175.855319,85.3648414 175.504,81.7699478 C175.125447,77.8890967 172.459234,76.5464584 168.926979,76.4593095 C166.903489,76.4102882 164.87183,76.4497776 162.284596,76.4497776 L162.284596,62.7537776 C178.793872,60.0126712 190.198128,65.5057776 191.257532,81.3015222 C191.701447,87.9343733 191.636085,94.5985435 192.060936,101.231395 C192.247489,104.839905 192.786723,108.421182 193.673191,111.92348 Z" fill="#ffffff"></path>-->
+<!--                  </g>-->
+<!--                </svg>-->
+<!--              </span>-->
+<!--            </ng-template>-->
+<!--          </a>-->
+<!--        </div>-->
       </nav>
     </mat-nav-list>
   </mat-sidenav>
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.scss
index 49b8ffc..fd1e8e9 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.scss
@@ -168,6 +168,10 @@
   color: #577289 !important;
   outline: none;
 
+  span {
+    vertical-align: middle;
+  }
+
   i {
     vertical-align: middle;
     padding-left: 8px;
@@ -178,6 +182,9 @@
     background: none !important;
     color: #36afd5 !important;
   }
+  &:not(.has-children):hover #swager-bgr{
+    fill: #36afd5;
+  }
 
   &:not(:last-child)::after {
     content: ' ';
@@ -192,6 +199,9 @@
 
   &.active {
     color: #36afd5 !important;
+    svg #swager-bgr {
+      fill: #36afd5;
+    }
   }
 
   &.has-children {
@@ -286,15 +296,33 @@
   transform: translateY(-4px) translateX(6px) rotate(-45deg);
 }
 
+mat-sidenav {
+
+  mat-nav-list {
+    height: 100%;
+
+    nav {
+      height: 100%;
+      display: flex;
+      justify-content: space-between;
+      flex-direction: column;
+
+      svg {
+        width: 37px;
+        padding: 0 2px 0 5px;
+        vertical-align: middle;
+      }
+    }
+  }
+}
+
 mat-sidenav-content {
   &.mat-drawer-content {
     transition: all 0.35s ease-out;
-
   }
 
   .fade-animation {
     display: block;
-    height: 100%;
     max-height: 100%;
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.ts
index 7c61100..1f33caa 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.ts
@@ -19,7 +19,7 @@
 
 import { Component, ViewEncapsulation, OnInit, OnDestroy, ViewChild } from '@angular/core';
 import { MatDialog, MatDialogRef } from '@angular/material/dialog';
-import { Subscription, timer, interval } from 'rxjs';
+import {Subscription, timer, interval, Subject} from 'rxjs';
 import { ToastrService } from 'ngx-toastr';
 import { RouterOutlet } from '@angular/router';
 
@@ -38,6 +38,8 @@
   animateChild,
   state
 } from '@angular/animations';
+import {skip} from 'rxjs/operators';
+import {ProgressBarService} from '../../core/services/progress-bar.service';
 
 @Component({
   selector: 'dlab-navbar',
@@ -50,7 +52,7 @@
       ], { optional: true }),
       group([
         query(':leave', [
-          animate('.3s ease-in-out',
+          animate('0s',
             style({
               opacity: 0,
             })
@@ -73,7 +75,6 @@
   encapsulation: ViewEncapsulation.None
 })
 export class NavbarComponent implements OnInit, OnDestroy {
-  readonly PROVIDER = DICTIONARY.cloud_provider;
 
   private readonly CHECK_ACTIVE_SCHEDULE_TIMEOUT: number = 55000;
   private readonly CHECK_ACTIVE_SCHEDULE_PERIOD: number = 15;
@@ -83,9 +84,10 @@
   isLoggedIn: boolean = false;
   metadata: any;
   isExpanded: boolean = true;
-
+  public showProgressBar: any = false;
   healthStatus: GeneralEnvironmentStatus;
   subscriptions: Subscription = new Subscription();
+  showProgressBarSubscr = new Subscription();
 
   constructor(
     public toastr: ToastrService,
@@ -94,20 +96,22 @@
     private healthStatusService: HealthStatusService,
     private schedulerService: SchedulerService,
     private storage: StorageService,
-    private dialog: MatDialog
+    private dialog: MatDialog,
+    private progressBarService: ProgressBarService,
   ) { }
 
   ngOnInit() {
+    this.showProgressBarSubscr = this.progressBarService.showProgressBar.subscribe(isProgressBarVissible => this.showProgressBar = isProgressBarVissible);
     this.applicationSecurityService.loggedInStatus.subscribe(response => {
       this.subscriptions.unsubscribe();
       this.subscriptions.closed = false;
 
       this.isLoggedIn = response;
       if (this.isLoggedIn) {
-        this.subscriptions.add(this.healthStatusService.statusData.subscribe(result => {
+        this.subscriptions.add(this.healthStatusService.statusData.pipe(skip(1)).subscribe(result => {
           this.healthStatus = result;
           result.status && this.checkQuoteUsed(this.healthStatus);
-          result.status && !result.projectAssigned && this.checkAssignment(this.healthStatus);
+          result.status && !result.projectAssigned && !result.admin && this.checkAssignment(this.healthStatus);
         }));
         this.subscriptions.add(timer(0, this.CHECK_ACTIVE_SCHEDULE_TIMEOUT).subscribe(() => this.refreshSchedulerData()));
         this.currentUserName = this.getUserName();
@@ -118,6 +122,7 @@
 
   ngOnDestroy(): void {
     this.subscriptions.unsubscribe();
+    this.showProgressBarSubscr.unsubscribe();
   }
 
   public getRouterOutletState(routerOutlet: RouterOutlet) {
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java b/services/self-service/src/main/resources/webapp/src/app/swagger/index.ts
similarity index 70%
rename from integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java
rename to services/self-service/src/main/resources/webapp/src/app/swagger/index.ts
index 4b70836..61da422 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/exceptions/JenkinsException.java
+++ b/services/self-service/src/main/resources/webapp/src/app/swagger/index.ts
@@ -17,13 +17,16 @@
  * under the License.
  */
 
-package com.epam.dlab.automation.exceptions;
+import { NgModule } from '@angular/core';
+import { CommonModule } from '@angular/common';
 
-public class JenkinsException extends RuntimeException {
+import { SwaggerComponent } from './swagger.component';
 
-	private static final long serialVersionUID = 1L;
+export * from './swagger.component';
 
-	public JenkinsException(String message) {
-		super(message);
-	}
-}
+@NgModule({
+  imports: [CommonModule],
+  declarations: [SwaggerComponent],
+  exports: [SwaggerComponent]
+})
+export class SwaggerAPIModule { }
diff --git a/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.html b/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.html
new file mode 100644
index 0000000..467bfb5
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.html
@@ -0,0 +1,20 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing,
+  ~ software distributed under the License is distributed on an
+  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  ~ KIND, either express or implied.  See the License for the
+  ~ specific language governing permissions and limitations
+  ~ under the License.
+  -->
+
+<div id="swagger-ui"></div>
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java b/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.scss
similarity index 91%
rename from integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
rename to services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.scss
index 1e49a60..3d56d22 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/docker/Labels.java
+++ b/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.scss
@@ -1,4 +1,4 @@
-/*
+/*!
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,8 +16,3 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-package com.epam.dlab.automation.docker;
-
-class Labels {
-}
diff --git a/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.ts b/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.ts
new file mode 100644
index 0000000..5c3c612
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/swagger/swagger.component.ts
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { Component, OnInit } from '@angular/core';
+import {HealthStatusService} from '../core/services';
+import {ToastrService} from 'ngx-toastr';
+
+declare const SwaggerUIBundle: any;
+
+@Component({
+  selector: 'dlab-swagger',
+  templateUrl: './swagger.component.html',
+  styleUrls: ['./swagger.component.scss']
+})
+export class SwaggerComponent implements OnInit {
+  private healthStatus: any;
+
+  constructor(
+    private healthStatusService: HealthStatusService,
+    public toastr: ToastrService,
+    ) {
+  }
+
+  ngOnInit(): void {
+    this.getEnvironmentHealthStatus();
+    const ui = SwaggerUIBundle({
+      dom_id: '#swagger-ui',
+      layout: 'BaseLayout',
+      presets: [
+        SwaggerUIBundle.presets.apis,
+        SwaggerUIBundle.SwaggerUIStandalonePreset
+      ],
+      url: '../assets/endpoint-api.json',
+      docExpansion: 'none',
+      operationsSorter: 'alpha'
+    });
+  }
+
+  private getEnvironmentHealthStatus() {
+    this.healthStatusService.getEnvironmentHealthStatus().subscribe(
+      (result: any) => {
+        this.healthStatus = result;
+      },
+      error => this.toastr.error(error.message, 'Oops!'));
+  }
+
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/webterminal/webterminal.component.html b/services/self-service/src/main/resources/webapp/src/app/webterminal/webterminal.component.html
index 4aedd1c..6fe2d98 100644
--- a/services/self-service/src/main/resources/webapp/src/app/webterminal/webterminal.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/webterminal/webterminal.component.html
@@ -15,6 +15,6 @@
   ~ KIND, either express or implied.  See the License for the
   ~ specific language governing permissions and limitations
   ~ under the License.
--->
+  -->
 
 <div #terminal id="display" class="guac-display guac-loading"></div>
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
index 6943d3d..c6f8fe8 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
@@ -49,7 +49,7 @@
 
 .modal-fullscreen {
   width: 100vw;
-  height: 100vh;
+  min-height: 80vh;
 }
 
 mat-dialog-container {
@@ -62,6 +62,7 @@
 
     #dialog-box {
       color: $modal-text-color;
+      min-height: 150px;
 
       .dialog-header {
         padding-left: 30px;
@@ -128,7 +129,7 @@
         margin-right: 2px;
       }
 
-      .btn {
+      .btn:not(.mat-icon-button) {
         padding: 6px 15px;
         min-width: 140px;
         font-weight: 600;
@@ -138,6 +139,7 @@
         color: #35afd5;
         text-decoration: none;
         transition: all .45s ease-in-out;
+        line-height: 19px;
 
         &:hover {
           color: #5faec7;
@@ -267,7 +269,7 @@
     }
   }
 
-  span {
+  span:not(.description) {
     font-size: 14px;
     overflow: hidden;
     text-overflow: ellipsis;
@@ -288,3 +290,60 @@
     padding-left: 45px !important;
   }
 }
+
+.cdk-overlay-container .cdk-overlay-pane {
+  .mat-select-panel {
+    &.ng-animating {
+      visibility: hidden;
+    }
+  }
+
+  .create-resources-dialog {
+    margin-left: 6px;
+    min-width: calc(100% + 10px) !important;
+  }
+
+  .create-resources-shapes {
+    margin-left: 22px;
+    min-width: calc(100% + 10px) !important;
+  }
+
+  .crete-project-dialog {
+    margin-left: 30px;
+    min-width: calc(100% + 10px) !important;
+  }
+
+  .select-role {
+    margin-left: 30px;
+    min-width: calc(100% + 10px) !important;
+  }
+}
+
+.strong {
+  font-weight: 600;
+  color: $blue-grey-color;
+}
+
+.flex {
+  display: flex;
+}
+
+.endpoints-table{
+  border-collapse: separate;
+}
+
+.mat-step-icon-state-number .mat-step-icon-content{
+  top: 49%;
+}
+
+.confirmation-dialog p.delete-user{
+  font-weight: 500;
+  max-height: 200px;
+  overflow: auto;
+}
+
+@media screen and (max-width: 1280px) {
+  .modal-fullscreen {
+    max-width: 100vw !important;
+  }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_general.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_general.scss
index 8a3250e..769b5ef 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_general.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_general.scss
@@ -46,7 +46,6 @@
   width: 100%;
   height: 36px;
   padding: 0 10px;
-  color: #455c74;
   color: #6e7ca0;
   border: 1px solid transparent;
   background: rgba(247, 247, 247, 0.87);
@@ -86,6 +85,7 @@
   padding-bottom: 20px;
   position: relative;
 }
+
 .row-wrap {
   padding-bottom: 0;
 }
@@ -103,6 +103,7 @@
   text-align: left;
   font-family: 'Open Sans', sans-serif;
 }
+
 .control-group .control {
   width: 65%;
 }
@@ -165,4 +166,9 @@
   vertical-align: middle;
   color: #35afd5;
   line-height: 26px;
-}
\ No newline at end of file
+}
+
+input[type=file],
+input[type=file]::-webkit-file-upload-button {
+    cursor: pointer;
+}
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_reset.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_reset.scss
index 8875e14..6e90bae 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_reset.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_reset.scss
@@ -52,7 +52,7 @@
 }
 
 table {
-  border-collapse: collapse;
+  //border-collapse: collapse;
   border-spacing: 0;
 }
 
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
index c56d090..43e9c50 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
@@ -122,6 +122,9 @@
 
       &.not-allowed {
         background-color: #dcdcdc;
+        .mat-select-placeholder {
+         cursor: not-allowed;
+        }
       }
     }
   }
@@ -211,6 +214,17 @@
   font-size: 14px;
 }
 
+.mat-select-disabled{
+  .mat-select-placeholder {
+    cursor: not-allowed;
+  }
+
+}
+
+.mat-select-disabled + .caret {
+  cursor: not-allowed !important;
+}
+
 .mat-input-placeholder {
   font-weight: 400;
 }
@@ -285,6 +299,24 @@
   word-break: break-all;
 }
 
+.create-cluster,
+.project-form,
+.create-environment,
+.selection,
+.manage-roles{
+    .mat-select-value{
+      overflow: visible;
+    }
+    .mat-form-field-label {
+      line-height: 21px !important;
+      font-size: 15px !important;
+
+      &:not(.mat-form-field-empty) {
+        color: transparent !important;
+      }
+    }
+}
+
 .manage-roles,
 .project-form,
 .selection {
@@ -395,6 +427,7 @@
       .mat-select-trigger {
         width: 100%;
         display: flex;
+        max-width: 500px;
       }
     }
   }
@@ -582,10 +615,8 @@
 .mat-table {
   .header-row {
     th.mat-header-cell {
-      font-size: 15px;
       font-family: 'Open Sans', sans-serif;
       font-weight: 600;
-      color: #607D8B;
     }
 
     .mat-cell {
@@ -625,3 +656,14 @@
     background-color: #baf0f7;
   }
 }
+.manage-roles{
+  .mat-horizontal-content-container{
+    overflow: visible !important;
+  }
+}
+
+.filter-row-item, .label-header{
+  box-shadow: inset 0 -1px 0 lightgrey;
+  border-bottom: none !important;
+}
+
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_variables.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_variables.scss
index 538128b..63d8202 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_variables.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_variables.scss
@@ -20,4 +20,8 @@
 $modal-text-color: #718aa5;
 $modal-header-color: #f6fafe;
 
-$brand-color: #4ab8dc;
\ No newline at end of file
+$brand-color: #4ab8dc;
+$blue-grey-color: #718ba6;
+$dark-grey-color: #455c74;
+
+$default-font-size: 14px;
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/app-loading.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/app-loading.scss
index dfaccb2..6fb034b 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/app-loading.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/app-loading.scss
@@ -37,11 +37,9 @@
   animation: rotate 2s linear infinite;
   transform-origin: center center;
   position: absolute;
-  top: 0;
-  bottom: 0;
-  left: 0;
-  right: 0;
-  margin: auto;
+  top: calc(50% - 100px);
+  left: calc(50% - 100px);
+  margin: 0;
 }
 
 .app-loading .spinner .path {
@@ -84,3 +82,8 @@
     background-size: cover;
   }
 }
+
+.nav-bar .mat-progress-bar {
+  position: absolute;
+  height: 2px;
+}
diff --git a/services/self-service/src/main/resources/webapp/src/assets/svg/swagger-logo.svg b/services/self-service/src/main/resources/webapp/src/assets/svg/swagger-logo.svg
new file mode 100644
index 0000000..cb85281
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/assets/svg/swagger-logo.svg
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg width="256px" height="256px" viewBox="0 0 256 256" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="xMidYMid">
+		<g>
+				<path d="M127.059657,255.996921 C58.8506544,255.526472 -0.457073619,198.918442 0.00265506057,126.998303 C0.444649399,57.7958628 57.9516598,-0.468967577 129.11002,0.00284555012 C198.267128,0.462386081 256.613109,57.8667711 255.995136,128.194199 C256.568091,197.883453 197.934268,256.489189 127.059657,255.996921 Z M127.059657,255.996921 C58.8506544,255.526472 -0.457073619,198.918442 0.00265506057,126.998303 C0.444649399,57.7958628 57.9516598,-0.468967577 129.11002,0.00284555012 C198.267128,0.462386081 256.613109,57.8667711 255.995136,128.194199 C256.568091,197.883453 197.934268,256.489189 127.059657,255.996921 Z" fill="#FFFFFF"></path>
+				<path d="M127.184644,238.997327 C68.0323765,238.589271 16.6036091,189.498744 17.0023028,127.131428 C17.3860285,67.1185953 67.2554,16.5917106 128.963117,17.0024872 C188.934544,17.4010221 239.531905,67.1825241 238.995778,128.169251 C239.492444,188.602381 188.64743,239.424426 127.184644,238.997327 Z M127.184644,238.997327 C68.0323765,238.589271 16.6036091,189.498744 17.0023028,127.131428 C17.3860285,67.1185953 67.2554,16.5917106 128.963117,17.0024872 C188.934544,17.4010221 239.531905,67.1825241 238.995778,128.169251 C239.492444,188.602381 188.64743,239.424426 127.184644,238.997327 Z" fill="#ececec"></path>
+				<path d="M169.327319,127.956161 C169.042723,133.246373 164.421106,137.639224 159.866213,136.872586 C159.844426,136.872586 159.821277,136.872586 159.798128,136.872586 C154.753021,136.879395 150.658383,132.794288 150.652936,127.749182 C150.824511,122.690458 155.019915,118.703395 160.08,118.789182 C165.125106,118.813692 169.59966,123.077182 169.327319,127.956161 Z M88.2011915,179.220161 C90.1034894,179.27599 92.0071489,179.235139 94.2008511,179.235139 L94.2008511,193.021012 C80.5661277,195.326373 69.3348085,191.455054 66.5787234,179.929607 C65.6350638,175.69199 65.0549787,171.380841 64.8425532,167.04382 C64.5497872,162.452161 65.0563404,157.808756 64.706383,153.225267 C63.7368511,140.613182 62.1028085,136.30748 50,135.711054 L50,120.014714 C50.8674043,119.81182 51.7470638,119.662033 52.6321702,119.562629 C59.2677447,119.23582 62.0646809,117.201437 63.5489362,110.665267 C64.2243404,106.992756 64.6246809,103.275309 64.7431489,99.5428839 C65.268766,92.3258627 65.0822128,84.991735 66.2845957,77.8918201 C68.0221277,67.6245861 74.3962553,62.6366712 84.9249362,62.0783733 C87.9206809,61.9176925 90.9259574,62.0538627 94.3206809,62.0538627 L94.3206809,76.1447563 C92.9235745,76.2441605 91.6435745,76.4470542 90.3717447,76.4089265 C81.7916596,76.146118 81.3477447,79.0683308 80.7213617,86.1709691 C80.3305532,90.6250967 80.8697872,95.1554797 80.5661277,99.6245861 C80.2488511,104.071905 79.6537872,108.496075 78.7850213,112.869863 C77.547234,119.208586 73.6500426,123.922799 68.2495319,127.92348 C78.7332766,134.745607 79.9261277,145.346458 80.6069787,156.110714 C80.9732766,161.895224 80.8057872,167.720586 81.3926809,173.476501 C81.8502128,177.944246 83.5877447,179.08399 88.2011915,179.220161 Z M97.0372766,118.789182 C97.0917447,118.789182 97.1448511,118.789182 97.1993191,118.789182 C102.211745,118.872246 106.209702,123.002288 106.126638,128.016075 C106.126638,128.180841 106.121191,128.344246 106.11166,128.50765 C105.829787,133.407054 101.630298,137.149012 96.7308936,136.867139 C96.5334468,136.871224 96.3373617,136.867139 96.1399149,136.857607 C91.1506383,136.609778 87.3065532,132.36399 87.554383,127.374714 C87.8022128,122.385437 92.048,118.541352 97.0372766,118.789182 Z M128.273362,118.789182 C133.755574,118.746969 137.396766,122.29965 137.425362,127.719224 C137.455319,133.284501 134.003404,136.845352 128.556596,136.868501 C123.017191,136.893012 119.370553,133.389352 119.340596,128.002458 C119.324255,127.727395 119.32017,127.452331 119.32834,127.177267 C119.482213,122.390884 123.486979,118.635309 128.273362,118.789182 Z M193.673191,111.92348 C195.131574,117.370288 197.970723,119.284841 203.704851,119.546288 C204.644426,119.589863 205.579915,119.749182 206.868085,119.892161 L206.868085,135.584416 C206.170894,135.813182 205.456,135.984756 204.730213,136.096416 C197.046128,136.574373 193.54383,139.726714 192.76766,147.431224 C192.272,152.349692 192.312851,157.322629 191.972426,162.258799 C191.829447,167.678373 191.336511,173.082969 190.49634,178.438544 C188.535489,188.142033 182.477277,192.982884 172.467404,193.573863 C169.245617,193.764501 166.000681,193.60382 162.526979,193.60382 L162.526979,179.578288 C164.396596,179.462544 166.046979,179.303224 167.701447,179.263735 C173.682043,179.120756 175.796766,177.192586 176.089532,171.252841 C176.413617,164.727565 176.555234,158.194118 176.846638,151.66748 C177.270128,142.233607 179.853277,133.806033 188.641702,127.922118 C183.612936,124.336756 179.575489,119.994288 178.529702,114.138969 C177.264681,107.041778 176.85617,99.7879903 176.175319,92.5913946 C175.838979,88.9937776 175.855319,85.3648414 175.504,81.7699478 C175.125447,77.8890967 172.459234,76.5464584 168.926979,76.4593095 C166.903489,76.4102882 164.87183,76.4497776 162.284596,76.4497776 L162.284596,62.7537776 C178.793872,60.0126712 190.198128,65.5057776 191.257532,81.3015222 C191.701447,87.9343733 191.636085,94.5985435 192.060936,101.231395 C192.247489,104.839905 192.786723,108.421182 193.673191,111.92348 Z" fill="#FFFFFF"></path>
+		</g>
+</svg>
diff --git a/services/self-service/src/main/resources/webapp/src/dictionary/aws.dictionary.ts b/services/self-service/src/main/resources/webapp/src/dictionary/aws.dictionary.ts
index fd767e7..f989415 100644
--- a/services/self-service/src/main/resources/webapp/src/dictionary/aws.dictionary.ts
+++ b/services/self-service/src/main/resources/webapp/src/dictionary/aws.dictionary.ts
@@ -17,15 +17,11 @@
  * under the License.
  */
 
-export const NAMING_CONVENTION = {
+export const NAMING_CONVENTION_AWS = {
     'cloud_provider': 'aws',
-    'use_ldap': true,
-    'notebook_instance_size': 'Instance shape',
-    'personal_storage': 'Shared bucket',
-    'collaboration_storage': 'Shared project bucket',
+    'personal_storage': 'Project bucket',
     'account': '',
     'container': '',
-    'data_engine': 'Deploy Spark Server / Deploy EMR',
     'image': 'AMI',
     'data_engine_master_instance_size': 'Master instance shape',
     'data_engine_slave_instance_size': 'Slave instance shape',
@@ -34,7 +30,7 @@
     'total_instance_number': 'total_instance_number',
     'spot_instance': 'Spot instance',
     'cluster_version': 'emr_version',
-    'max_cluster_name_length': 64,
+    'max_cluster_name_length': 10,
     'billing': {
         'resourceName': 'resource_name',
         'cost': 'cost',
@@ -52,7 +48,6 @@
     'service': 'Service',
     'type': 'Type',
     'instance_size': 'Shape',
-    'computational_resource': 'Computational resources',
     'user_storage_account_name': '',
     'shared_storage_account_name': '',
     'bucket_name': 'user_own_bicket_name',
@@ -65,9 +60,6 @@
         'total_instance_number_max': 'max_emr_instance_count',
         'min_emr_spot_instance_bid_pct': 'min_emr_spot_instance_bid_pct',
         'max_emr_spot_instance_bid_pct': 'max_emr_spot_instance_bid_pct',
-        'data_engine_master_instance_size': 'Master instance shape',
-        'data_engine_slave_instance_size': 'Slave instance shape',
-        'instance_number': 'Total instance number',
         'master_node_shape': 'master_node_shape',
         'slave_node_shape': 'slave_node_shape',
         'total_instance_number': 'total_instance_number',
@@ -76,40 +68,9 @@
         'total_instance_number_min': 'min_spark_instance_count',
         'total_instance_number_max': 'max_spark_instance_count',
         'data_engine_master_instance_size': 'Node shape',
-        'instance_number': 'Total node number',
         'master_node_shape': 'dataengine_instance_shape',
         'total_instance_number': 'dataengine_instance_count',
     },
-    'max_project_name_length': 40
 };
 
-export class ReportingConfigModel {
 
-    static getDefault(): ReportingConfigModel {
-        return new ReportingConfigModel([], [], [], [], [], '', '', '', []);
-    }
-
-    constructor(
-        public user: Array<string>,
-        public product: Array<string>,
-        public resource_type: Array<string>,
-        public status: Array<string>,
-        public shape: Array<string>,
-        public date_start: string,
-        public date_end: string,
-        public dlab_id: string,
-        public project?: Array<string>
-    ) { }
-
-    defaultConfigurations(): void {
-        this.user = [];
-        this.product = [];
-        this.resource_type = [];
-        this.status = [];
-        this.shape = [];
-        this.date_start = '';
-        this.date_end = '';
-        this.dlab_id = '';
-        this.project = [];
-    }
-}
diff --git a/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts b/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
index fd178c5..b7d9abc 100644
--- a/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
+++ b/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
@@ -17,34 +17,30 @@
  * under the License.
  */
 
-export const NAMING_CONVENTION = {
+export const NAMING_CONVENTION_AZURE = {
     'cloud_provider': 'azure',
-    'use_ldap': true,
-    'notebook_instance_size': 'Virtual machine size',
-    'personal_storage': 'Shared bucket',
-    'collaboration_storage': 'Shared project bucket',
+    'personal_storage': 'Project bucket',
+    'collaboration_storage': 'Shared endpoint bucket',
     'account': 'Account:',
     'container': 'Container:',
-    'data_engine': 'Deploy Spark Server / Deploy HDInsight',
     'image': 'image',
     'data_engine_master_instance_size': 'Head node size',
     'data_engine_slave_instance_size': 'Worker node size',
     'master_node_shape': 'azure_dataengine_master_size',
     'slave_node_shape': 'azure_dataengine_slave_size',
     'total_instance_number': 'dataengine_instance_count',
-
     'spot_instance': 'Low-priority virtual machines',
     'cluster_version': '',
     'max_cluster_name_length': 10,
     'billing': {
         'resourceName': 'resourceName',
-        'cost': 'costString',
+        'cost': 'cost',
         'costTotal': 'cost_total',
         'currencyCode': 'currencyCode',
         'dateFrom': 'from',
         'dateTo': 'to',
         'service': 'meterCategory',
-        'service_filter_key': 'category',
+        'service_filter_key': 'meterCategory',
         'type': '',
         'resourceType': 'resource_type',
         'instance_size': 'size',
@@ -53,7 +49,6 @@
     'service': 'Category',
     'type': '',
     'instance_size': 'Size',
-    'computational_resource': 'Computational resources',
     'user_storage_account_name': 'user_storage_account_name',
     'shared_storage_account_name': 'shared_storage_account_name',
     'bucket_name': 'user_container_name',
@@ -66,9 +61,6 @@
         'total_instance_number_max': 'max_emr_instance_count',
         'min_emr_spot_instance_bid_pct': 'min_emr_spot_instance_bid_pct',
         'max_emr_spot_instance_bid_pct': 'max_emr_spot_instance_bid_pct',
-        'data_engine_master_instance_size': 'Master instance shape',
-        'data_engine_slave_instance_size': 'Slave instance shape',
-        'instance_number': 'Total instance number',
         'master_node_shape': 'master_node_shape',
         'slave_node_shape': 'slave_node_shape',
         'total_instance_number': 'total_instance_number',
@@ -77,40 +69,8 @@
         'total_instance_number_min': 'min_spark_instance_count',
         'total_instance_number_max': 'max_spark_instance_count',
         'data_engine_master_instance_size': 'Node size',
-        'instance_number': 'Total node number',
         'master_node_shape': 'dataengine_instance_shape',
         'total_instance_number': 'dataengine_instance_count'
     },
-    'max_project_name_length': 30
 };
 
-export class ReportingConfigModel {
-
-    static getDefault(): ReportingConfigModel {
-        return new ReportingConfigModel([], [], [], [], [], '', '', '', []);
-    }
-
-    constructor(
-        public user: Array<string>,
-        public category: Array<string>,
-        public resource_type: Array<string>,
-        public status: Array<string>,
-        public size: Array<string>,
-        public date_start: string,
-        public date_end: string,
-        public dlab_id: string,
-        public project?: Array<string>
-    ) { }
-
-    defaultConfigurations(): void {
-        this.user = [];
-        this.category = [];
-        this.resource_type = [];
-        this.status = [];
-        this.size = [];
-        this.date_start = '';
-        this.date_end = '';
-        this.dlab_id = '';
-        this.project = [];
-    }
-}
diff --git a/services/self-service/src/main/resources/webapp/src/dictionary/gcp.dictionary.ts b/services/self-service/src/main/resources/webapp/src/dictionary/gcp.dictionary.ts
index 3d6f26e..d92b7ff 100644
--- a/services/self-service/src/main/resources/webapp/src/dictionary/gcp.dictionary.ts
+++ b/services/self-service/src/main/resources/webapp/src/dictionary/gcp.dictionary.ts
@@ -17,26 +17,20 @@
  * under the License.
  */
 
-export const NAMING_CONVENTION = {
+export const NAMING_CONVENTION_GCP = {
     'cloud_provider': 'gcp',
-    'use_ldap': true,
-    'notebook_instance_size': 'Instance type',
-    'personal_storage': 'Shared bucket',
-    'collaboration_storage': 'Shared project bucket',
+    'personal_storage': 'Project bucket',
     'account': '',
     'container': '',
-    'data_engine': 'Deploy Spark Server / Deploy Dataproc',
     'image': 'Not available',
     'data_engine_master_instance_size': 'Master machine type',
     'data_engine_slave_instance_size': 'Slave machine type',
-
     'master_node_shape': 'master_node_shape',
     'slave_node_shape': 'slave_node_shape',
     'total_instance_number': 'total_instance_number',
-
     'spot_instance': 'Preemptible worker nodes',
     'cluster_version': 'dataproc_version',
-    'max_cluster_name_length': 9,
+    'max_cluster_name_length': 7,
     'billing': {
         'resourceName': 'resource_name',
         'cost': 'cost',
@@ -48,13 +42,12 @@
         'service_filter_key': 'product',
         'type': 'dlab_resource_type',
         'resourceType': 'dlab_resource_type',
-        'instance_size': 'shape',
+        'instance_size': 'shapes',
         'dlabId': 'dlab_id'
     },
     'service': 'Product',
     'type': 'Resource',
     'instance_size': 'Type',
-    'computational_resource': 'Computational resources',
     'user_storage_account_name': '',
     'shared_storage_account_name': '',
     'bucket_name': 'user_own_bucket_name',
@@ -67,9 +60,6 @@
         'total_instance_number_max': 'max_instance_count',
         'min_emr_spot_instance_bid_pct': 'min_emr_spot_instance_bid_pct',
         'max_emr_spot_instance_bid_pct': 'max_emr_spot_instance_bid_pct',
-        'data_engine_master_instance_size': 'Master machine type',
-        'data_engine_slave_instance_size': 'Slave machine type',
-        'instance_number': 'Total machine count',
         'master_instance_number': 'Master node count',
         'slave_instance_number': 'Worker node count',
         'master_node_shape': 'master_node_shape',
@@ -81,41 +71,10 @@
         'total_instance_number_min': 'min_spark_instance_count',
         'total_instance_number_max': 'max_spark_instance_count',
         'data_engine_master_instance_size': 'Machine type',
-        'instance_number': 'Total machine number',
         'master_instance_number': 'Master machine number',
         'master_node_shape': 'dataengine_instance_shape',
         'total_instance_number': 'dataengine_instance_count',
     },
-    'max_project_name_length': 10
 };
 
-export class ReportingConfigModel {
 
-    static getDefault(): ReportingConfigModel {
-        return new ReportingConfigModel([], [], [], [], [], '', '', '', []);
-    }
-
-    constructor(
-        public user: Array<string>,
-        public product: Array<string>,
-        public resource_type: Array<string>,
-        public status: Array<string>,
-        public shape: Array<string>,
-        public date_start: string,
-        public date_end: string,
-        public dlab_id: string,
-        public project?: Array<string>
-    ) { }
-
-    defaultConfigurations(): void {
-        this.user = [];
-        this.product = [];
-        this.resource_type = [];
-        this.status = [];
-        this.shape = [];
-        this.date_start = '';
-        this.date_end = '';
-        this.dlab_id = '';
-        this.project = [];
-    }
-}
diff --git a/services/self-service/src/main/resources/webapp/src/dictionary/global.dictionary.ts b/services/self-service/src/main/resources/webapp/src/dictionary/global.dictionary.ts
index 6d0d8ea..26fe456 100644
--- a/services/self-service/src/main/resources/webapp/src/dictionary/global.dictionary.ts
+++ b/services/self-service/src/main/resources/webapp/src/dictionary/global.dictionary.ts
@@ -19,8 +19,45 @@
 
 // from './{{ aws | gcp | azure }}.dictionary';
 
-import { NAMING_CONVENTION } from './CLOUD_PROVIDER.dictionary';
+import { NAMING_CONVENTION_AWS } from './aws.dictionary';
+import { NAMING_CONVENTION_GCP } from './gcp.dictionary';
+import { NAMING_CONVENTION_AZURE } from './azure.dictionary';
 
-export * from './CLOUD_PROVIDER.dictionary';
+export const DICTIONARY = Object.freeze({
+  aws: NAMING_CONVENTION_AWS,
+  gcp: NAMING_CONVENTION_GCP,
+  azure: NAMING_CONVENTION_AZURE
+});
 
-export const DICTIONARY = Object.freeze(NAMING_CONVENTION);
+export class ReportingConfigModel {
+
+  static getDefault(): ReportingConfigModel {
+    return new ReportingConfigModel([], [], [], [], [], '', '', '', []);
+  }
+
+  constructor(
+    public users: Array<string>,
+    public products: Array<string>,
+    public resource_type: Array<string>,
+    public statuses: Array<string>,
+    public shapes: Array<string>,
+    public date_start: string,
+    public date_end: string,
+    public dlab_id: string,
+    public projects: Array<string>
+  ) { }
+
+  defaultConfigurations(): void {
+    this.users = [];
+    this.products = [];
+    this.resource_type = [];
+    this.statuses = [];
+    this.shapes = [];
+    this.date_start = '';
+    this.date_end = '';
+    this.dlab_id = '';
+    this.projects = [];
+  }
+}
+
+
diff --git a/services/self-service/src/main/resources/webapp/src/styles.scss b/services/self-service/src/main/resources/webapp/src/styles.scss
index c5462d2..e1bbe94 100644
--- a/services/self-service/src/main/resources/webapp/src/styles.scss
+++ b/services/self-service/src/main/resources/webapp/src/styles.scss
@@ -27,6 +27,10 @@
 @import '_theme.scss';
 @import '_dialogs.scss';
 
+.sans {
+  font-family: 'Open Sans', sans-serif !important;
+}
+
 .mat-tab-label {
   font-family: 'Open Sans', sans-serif !important;
   color: #455c74;
@@ -100,6 +104,7 @@
 
 .status {
   text-transform: capitalize;
+  text-align: left;
 }
 
 .running,
@@ -310,6 +315,10 @@
   margin-bottom: 10px;
 }
 
+.m-bott-30 {
+  margin-bottom: 10px;
+}
+
 .m-top-10p {
   margin-top: 10%;
 }
@@ -458,3 +467,10 @@
     }
   }
 }
+
+.dialog-max-width {
+  margin: 0 auto;
+  max-width: 350px;
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
diff --git a/services/self-service/src/main/resources/webapp/src/tsconfig.app.json b/services/self-service/src/main/resources/webapp/src/tsconfig.app.json
index 116e716..be171af 100644
--- a/services/self-service/src/main/resources/webapp/src/tsconfig.app.json
+++ b/services/self-service/src/main/resources/webapp/src/tsconfig.app.json
@@ -12,7 +12,15 @@
     ],
     "outDir": "../out-tsc/app",
     "baseUrl": "",
-    "types": []
+    "types": [],
+    "paths": {
+      "moment": [
+        "../node_modules/moment/min/moment.min.js"
+      ],
+      "moment-timezone": [
+        "../node_modules/moment-timezone/builds/moment-timezone-with-data-2012-2022.min.js"
+      ]
+    }
   },
   "typeRoots": [
     "node_modules/@types"
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EdgeResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EdgeResourceTest.java
deleted file mode 100644
index 075502e..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EdgeResourceTest.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.service.EdgeService;
-import com.epam.dlab.exceptions.DlabException;
-import io.dropwizard.auth.AuthenticationException;
-import io.dropwizard.testing.junit.ResourceTestRule;
-import org.apache.http.HttpStatus;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
-
-public class EdgeResourceTest extends TestBase {
-
-	private EdgeService edgeService = mock(EdgeService.class);
-
-	@Rule
-	public final ResourceTestRule resources = getResourceTestRuleInstance(new EdgeResource(edgeService));
-
-	@Before
-	public void setup() throws AuthenticationException {
-		authSetup();
-	}
-
-	@Test
-	public void start() {
-		when(edgeService.start(any(UserInfo.class))).thenReturn("someUuid");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure/edge/start")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getUserInfo()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals("someUuid", response.readEntity(String.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(edgeService).start(getUserInfo());
-		verifyNoMoreInteractions(edgeService);
-	}
-
-	@Test
-	public void startWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(edgeService.start(any(UserInfo.class))).thenReturn("someUuid");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure/edge/start")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getUserInfo()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals("someUuid", response.readEntity(String.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(edgeService).start(getUserInfo());
-		verifyNoMoreInteractions(edgeService);
-	}
-
-	@Test
-	public void startWithException() {
-		when(edgeService.start(any(UserInfo.class))).thenThrow(new DlabException("Could not start edge node"));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure/edge/start")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getUserInfo()));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		String expectedJson = "\"code\":500,\"message\":\"There was an error processing your request. " +
-				"It has been logged";
-		String actualJson = response.readEntity(String.class);
-		assertTrue(actualJson.contains(expectedJson));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(edgeService).start(getUserInfo());
-		verifyNoMoreInteractions(edgeService);
-	}
-
-	@Test
-	public void stop() {
-		when(edgeService.stop(any(UserInfo.class))).thenReturn("someUuid");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure/edge/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getUserInfo()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals("someUuid", response.readEntity(String.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(edgeService).stop(getUserInfo());
-		verifyNoMoreInteractions(edgeService);
-	}
-
-	@Test
-	public void stopWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(edgeService.stop(any(UserInfo.class))).thenReturn("someUuid");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure/edge/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getUserInfo()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals("someUuid", response.readEntity(String.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(edgeService).stop(getUserInfo());
-		verifyNoMoreInteractions(edgeService);
-	}
-
-	@Test
-	public void stopWithException() {
-		when(edgeService.stop(any(UserInfo.class))).thenThrow(new DlabException("Could not stop edge node"));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure/edge/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getUserInfo()));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		String expectedJson = "\"code\":500,\"message\":\"There was an error processing your request. " +
-				"It has been logged";
-		String actualJson = response.readEntity(String.class);
-		assertTrue(actualJson.contains(expectedJson));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(edgeService).stop(getUserInfo());
-		verifyNoMoreInteractions(edgeService);
-	}
-
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
index 4d62744..5ac537b 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
@@ -19,7 +19,7 @@
 
 package com.epam.dlab.backendapi.resources;
 
-import com.epam.dlab.backendapi.resources.dto.UserDTO;
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.service.EnvironmentService;
 import com.epam.dlab.exceptions.ResourceConflictException;
 import io.dropwizard.auth.AuthenticationException;
@@ -30,17 +30,23 @@
 import org.junit.Test;
 
 import javax.ws.rs.client.Entity;
-import javax.ws.rs.core.GenericType;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.Collections;
-import java.util.List;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class EnvironmentResourceTest extends TestBase {
 
@@ -55,45 +61,9 @@
 	}
 
 	@Test
-	public void getUsersWithActiveEnv() {
-		when(environmentService.getUsers()).thenReturn(Collections.singletonList(new UserDTO("activeUser",
-				null, UserDTO.Status.ACTIVE)));
-		final Response response = resources.getJerseyTest()
-				.target("/environment/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(Collections.singletonList(new UserDTO("activeUser", null, UserDTO.Status.ACTIVE)),
-				response.readEntity(new GenericType<List<UserDTO>>() {
-				}));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).getUsers();
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void getUsersWithActiveEnvWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(environmentService.getUsers()).thenReturn(Collections.singletonList(new UserDTO("activeUser",
-				null, UserDTO.Status.ACTIVE)));
-		final Response response = resources.getJerseyTest()
-				.target("/environment/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
-
-		assertEquals(HttpStatus.SC_FORBIDDEN, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(environmentService);
-	}
-
-	@Test
 	public void getAllEnv() {
-		when(environmentService.getAllEnv()).thenReturn(Collections.emptyList());
+		UserInfo userInfo = getUserInfo();
+		when(environmentService.getAllEnv(userInfo)).thenReturn(Collections.emptyList());
 		final Response response = resources.getJerseyTest()
 				.target("/environment/all")
 				.request()
@@ -103,14 +73,14 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).getAllEnv();
+		verify(environmentService).getAllEnv(eq(userInfo));
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void getAllEnvWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(environmentService.getAllEnv()).thenReturn(Collections.emptyList());
+		when(environmentService.getAllEnv(getUserInfo())).thenReturn(Collections.emptyList());
 		final Response response = resources.getJerseyTest()
 				.target("/environment/all")
 				.request()
@@ -124,157 +94,10 @@
 	}
 
 	@Test
-	public void terminateEnv() {
-		doNothing().when(environmentService).terminateEnvironment(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).terminateEnvironment(USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void terminateEnvWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(environmentService).terminateEnvironment(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_FORBIDDEN, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(environmentService);
-	}
-
-	@Test
-	public void terminateEnvWithResourceConflictException() {
-		doThrow(new ResourceConflictException("Can not terminate environment because one of the user resources is in" +
-				"status CREATING or STARTING")).when(environmentService).terminateEnvironment(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).terminateEnvironment(USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEnv() {
-		doNothing().when(environmentService).stopEnvironment(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).stopEnvironment(USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEnvWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(environmentService).stopEnvironment(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_FORBIDDEN, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEnvWithResourceConflictException() {
-		doThrow(new ResourceConflictException("Can not stop environment because one of the user resources is in " +
-				"status CREATING or STARTING")).when(environmentService).stopEnvironment(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).stopEnvironment(USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEdge() {
-		doNothing().when(environmentService).stopEdge(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/edge")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).stopEdge(USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEdgeWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(environmentService).stopEdge(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/edge")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_FORBIDDEN, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEdgeWithResourceConflictException() {
-		doThrow(new ResourceConflictException("Can not stop edge because its status is CREATING or STARTING"))
-				.when(environmentService).stopEdge(anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/edge")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).stopEdge(USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
 	public void stopNotebook() {
-		doNothing().when(environmentService).stopExploratory(anyString(), anyString());
+		doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName")
+				.target("/environment/stop/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -282,16 +105,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopExploratory(USER, "explName");
+		verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void stopNotebookWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).stopExploratory(anyString(), anyString());
+		doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName")
+				.target("/environment/stop/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -305,9 +128,9 @@
 	@Test
 	public void stopNotebookWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not stop notebook because its status is CREATING or STARTING"))
-				.when(environmentService).stopExploratory(anyString(), anyString());
+				.when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName")
+				.target("/environment/stop/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -315,15 +138,15 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopExploratory(USER, "explName");
+		verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void stopCluster() {
-		doNothing().when(environmentService).stopComputational(anyString(), anyString(), anyString());
+		doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName/compName")
+				.target("/environment/stop/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -331,16 +154,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopComputational(USER, "explName", "compName");
+		verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void stopClusterWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).stopComputational(anyString(), anyString(), anyString());
+		doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName/compName")
+				.target("/environment/stop/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -354,9 +177,9 @@
 	@Test
 	public void stopClusterWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not stop cluster because its status is CREATING or STARTING"))
-				.when(environmentService).stopComputational(anyString(), anyString(), anyString());
+				.when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName/compName")
+				.target("/environment/stop/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -364,15 +187,15 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopComputational(USER, "explName", "compName");
+		verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateNotebook() {
-		doNothing().when(environmentService).terminateExploratory(anyString(), anyString());
+		doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName")
+				.target("/environment/terminate/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -380,16 +203,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateExploratory(USER, "explName");
+		verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateNotebookWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).terminateExploratory(anyString(), anyString());
+		doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName")
+				.target("/environment/terminate/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -403,9 +226,9 @@
 	@Test
 	public void terminateNotebookWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not terminate notebook because its status is CREATING or STARTING"))
-				.when(environmentService).terminateExploratory(anyString(), anyString());
+				.when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName")
+				.target("/environment/terminate/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -413,15 +236,15 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateExploratory(USER, "explName");
+		verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateCluster() {
-		doNothing().when(environmentService).terminateComputational(anyString(), anyString(), anyString());
+		doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName/compName")
+				.target("/environment/terminate/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -429,16 +252,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateComputational(USER, "explName", "compName");
+		verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateClusterWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).terminateComputational(anyString(), anyString(), anyString());
+		doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName/compName")
+				.target("/environment/terminate/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -452,9 +275,9 @@
 	@Test
 	public void terminateClusterWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not terminate cluster because its status is CREATING or STARTING"))
-				.when(environmentService).terminateComputational(anyString(), anyString(), anyString());
+				.when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName/compName")
+				.target("/environment/terminate/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -462,7 +285,7 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateComputational(USER, "explName", "compName");
+		verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
index 0999019..bccfa8b 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
@@ -47,7 +47,14 @@
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class ExploratoryResourceTest extends TestBase {
 
@@ -103,15 +110,34 @@
 
 	@Test
 	public void start() {
+		ExploratoryActionFormDTO exploratoryDTO = getExploratoryActionFormDTO();
 		when(exploratoryService.start(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure_provision/exploratory_environment")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getExploratoryActionFormDTO()));
+				.post(Entity.json(exploratoryDTO));
+
+		assertEquals(HttpStatus.SC_OK, response.getStatus());
+		assertEquals("someUuid", response.readEntity(String.class));
+		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+
+		verify(exploratoryService).start(getUserInfo(), exploratoryDTO.getNotebookInstanceName(),
+				exploratoryDTO.getProjectName());
+
+		verifyZeroInteractions(exploratoryService);
+	}
+
+	@Test
+	public void startUnprocessableEntity() {
+		when(exploratoryService.start(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
+		final Response response = resources.getJerseyTest()
+				.target("/infrastructure_provision/exploratory_environment")
+				.request()
+				.header("Authorization", "Bearer " + TOKEN)
+				.post(Entity.json(getEmptyExploratoryActionFormDTO()));
 
 		assertEquals(HttpStatus.SC_UNPROCESSABLE_ENTITY, response.getStatus());
-		assertEquals("{\"errors\":[\"notebookInstanceName may not be empty\"]}", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
 		verifyZeroInteractions(exploratoryService);
@@ -125,7 +151,7 @@
 				.target("/infrastructure_provision/exploratory_environment")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getExploratoryActionFormDTO()));
+				.post(Entity.json(getEmptyExploratoryActionFormDTO()));
 
 		assertEquals(HttpStatus.SC_FORBIDDEN, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
@@ -135,9 +161,9 @@
 
 	@Test
 	public void stop() {
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/stop")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/stop")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -146,16 +172,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).stop(getUserInfo(), "someName");
+		verify(exploratoryService).stop(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void stopWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/stop")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/stop")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -164,16 +190,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).stop(getUserInfo(), "someName");
+		verify(exploratoryService).stop(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void stopWithException() {
 		doThrow(new DlabException("Could not stop exploratory environment"))
-				.when(exploratoryService).stop(any(UserInfo.class), anyString());
+				.when(exploratoryService).stop(any(UserInfo.class), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/stop")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/stop")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -185,15 +211,15 @@
 		assertTrue(actualJson.contains(expectedJson));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).stop(getUserInfo(), "someName");
+		verify(exploratoryService).stop(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void terminate() {
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/terminate")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -202,16 +228,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).terminate(getUserInfo(), "someName");
+		verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void terminateWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/terminate")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -220,16 +246,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).terminate(getUserInfo(), "someName");
+		verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void terminateWithException() {
 		doThrow(new DlabException("Could not terminate exploratory environment"))
-				.when(exploratoryService).terminate(any(UserInfo.class), anyString());
+				.when(exploratoryService).terminate(any(UserInfo.class), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/terminate")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -241,22 +267,22 @@
 		assertTrue(actualJson.contains(expectedJson));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).terminate(getUserInfo(), "someName");
+		verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void updateSparkConfig() {
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/reconfigure")
+				.target("/infrastructure_provision/exploratory_environment/someProject/someName/reconfigure")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.put(Entity.json(Collections.singletonList(new ClusterConfig())));
 
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 
-		verify(exploratoryService).updateClusterConfig(refEq(getUserInfo()), eq("someName"),
-				eq(Collections.singletonList(new ClusterConfig())));
+		verify(exploratoryService).updateClusterConfig(refEq(getUserInfo()), eq("someProject"),
+				eq("someName"), eq(Collections.singletonList(new ClusterConfig())));
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
@@ -264,9 +290,9 @@
 	public void getSparkConfig() {
 		final ClusterConfig config = new ClusterConfig();
 		config.setClassification("test");
-		when(exploratoryService.getClusterConfig(any(UserInfo.class), anyString())).thenReturn(Collections.singletonList(config));
+		when(exploratoryService.getClusterConfig(any(UserInfo.class), anyString(), anyString())).thenReturn(Collections.singletonList(config));
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/cluster/config")
+				.target("/infrastructure_provision/exploratory_environment/someProject/someName/cluster/config")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.get();
@@ -277,7 +303,7 @@
 		assertEquals(1, clusterConfigs.size());
 		assertEquals("test", clusterConfigs.get(0).getClassification());
 
-		verify(exploratoryService).getClusterConfig(refEq(getUserInfo()), eq("someName"));
+		verify(exploratoryService).getClusterConfig(refEq(getUserInfo()), eq("someProject"), eq("someName"));
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
@@ -294,10 +320,14 @@
 		return ecfDto;
 	}
 
-	private ExploratoryActionFormDTO getExploratoryActionFormDTO() {
+	private ExploratoryActionFormDTO getEmptyExploratoryActionFormDTO() {
 		return new ExploratoryActionFormDTO();
 	}
 
+	private ExploratoryActionFormDTO getExploratoryActionFormDTO() {
+		return new ExploratoryActionFormDTO("notebook_instance_name", "project_name");
+	}
+
 	private Exploratory getExploratory(@Valid @NotNull ExploratoryCreateFormDTO formDTO) {
 		return Exploratory.builder()
 				.name(formDTO.getName())
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
index be35816..089b308 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
@@ -45,10 +45,16 @@
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class ImageExploratoryResourceTest extends TestBase {
-
+	private static final String PROJECT = "projectName";
 	private ImageExploratoryService imageExploratoryService = mock(ImageExploratoryService.class);
 	private RequestId requestId = mock(RequestId.class);
 
@@ -63,7 +69,7 @@
 
 	@Test
 	public void createImage() {
-		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString()))
+		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString()))
 				.thenReturn("someUuid");
 		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
@@ -75,7 +81,7 @@
 		assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
+		verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName",
 				"someImageName", "someDescription");
 		verify(requestId).put(USER.toLowerCase(), "someUuid");
 		verifyNoMoreInteractions(imageExploratoryService, requestId);
@@ -84,7 +90,7 @@
 	@Test
 	public void createImageWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString()))
+		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString()))
 				.thenReturn("someUuid");
 		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
@@ -96,8 +102,7 @@
 		assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
-				"someImageName", "someDescription");
+		verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName", "someImageName", "someDescription");
 		verify(requestId).put(USER.toLowerCase(), "someUuid");
 		verifyNoMoreInteractions(imageExploratoryService, requestId);
 	}
@@ -105,7 +110,7 @@
 	@Test
 	public void createImageWithException() {
 		doThrow(new ResourceAlreadyExistException("Image with name is already exist"))
-				.when(imageExploratoryService).createImage(any(UserInfo.class), anyString(), anyString(), anyString());
+				.when(imageExploratoryService).createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure_provision/exploratory_environment/image")
 				.request()
@@ -115,8 +120,7 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
-				"someImageName", "someDescription");
+		verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName", "someImageName", "someDescription");
 		verifyNoMoreInteractions(imageExploratoryService);
 		verifyZeroInteractions(requestId);
 	}
@@ -168,10 +172,12 @@
 
 	@Test
 	public void getImage() {
-		when(imageExploratoryService.getImage(anyString(), anyString()))
+		when(imageExploratoryService.getImage(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(getImageList().get(0));
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure_provision/exploratory_environment/image/someName")
+				.queryParam("project", "someProject")
+				.queryParam("endpoint", "someEndpoint")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.get();
@@ -180,17 +186,19 @@
 		assertEquals(getImageList().get(0), response.readEntity(ImageInfoRecord.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).getImage(USER.toLowerCase(), "someName");
+		verify(imageExploratoryService).getImage(USER.toLowerCase(), "someName", "someProject", "someEndpoint");
 		verifyNoMoreInteractions(imageExploratoryService);
 	}
 
 	@Test
 	public void getImageWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(imageExploratoryService.getImage(anyString(), anyString()))
+		when(imageExploratoryService.getImage(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(getImageList().get(0));
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure_provision/exploratory_environment/image/someName")
+				.queryParam("project", "someProject")
+				.queryParam("endpoint", "someEndpoint")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.get();
@@ -199,7 +207,41 @@
 		assertEquals(getImageList().get(0), response.readEntity(ImageInfoRecord.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).getImage(USER.toLowerCase(), "someName");
+		verify(imageExploratoryService).getImage(USER.toLowerCase(), "someName", "someProject", "someEndpoint");
+		verifyNoMoreInteractions(imageExploratoryService);
+	}
+
+	@Test
+	public void getAllImagesForProject() {
+		when(imageExploratoryService.getImagesForProject(anyString())).thenReturn(getImageList());
+		final Response response = resources.getJerseyTest()
+				.target("/infrastructure_provision/exploratory_environment/image/all")
+				.queryParam("project", "someProject")
+				.request()
+				.header("Authorization", "Bearer " + TOKEN)
+				.get();
+
+		assertEquals(HttpStatus.SC_OK, response.getStatus());
+		assertEquals(getImageList(), response.readEntity(new GenericType<List<ImageInfoRecord>>() {}));
+		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+
+		verify(imageExploratoryService).getImagesForProject("someProject");
+		verifyNoMoreInteractions(imageExploratoryService);
+	}
+
+	@Test
+	public void getAllImagesForNullProject() {
+		when(imageExploratoryService.getImagesForProject(anyString())).thenReturn(getImageList());
+		final Response response = resources.getJerseyTest()
+				.target("/infrastructure_provision/exploratory_environment/image/all")
+				.request()
+				.header("Authorization", "Bearer " + TOKEN)
+				.get();
+
+		assertEquals(HttpStatus.SC_BAD_REQUEST, response.getStatus());
+		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+
+		verify(imageExploratoryService, never()).getImagesForProject(anyString());
 		verifyNoMoreInteractions(imageExploratoryService);
 	}
 
@@ -240,9 +282,11 @@
 	@Test
 	public void getImageWithException() {
 		doThrow(new ResourceNotFoundException("Image with name was not found for user"))
-				.when(imageExploratoryService).getImage(anyString(), anyString());
+				.when(imageExploratoryService).getImage(anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure_provision/exploratory_environment/image/someName")
+				.queryParam("project", "someProject")
+				.queryParam("endpoint", "someEndpoint")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.get();
@@ -250,18 +294,19 @@
 		assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).getImage(USER.toLowerCase(), "someName");
+		verify(imageExploratoryService).getImage(USER.toLowerCase(), "someName", "someProject", "someEndpoint");
 		verifyNoMoreInteractions(imageExploratoryService);
 	}
 
 	private ExploratoryImageCreateFormDTO getExploratoryImageCreateFormDTO() {
 		ExploratoryImageCreateFormDTO eicfDto = new ExploratoryImageCreateFormDTO("someImageName", "someDescription");
 		eicfDto.setNotebookName("someNotebookName");
+		eicfDto.setProjectName(PROJECT);
 		return eicfDto;
 	}
 
 	private List<ImageInfoRecord> getImageList() {
-		ImageInfoRecord imageInfoRecord = new ImageInfoRecord("someName", "someDescription", "someProject", "someEndpoint", "someApp",
+		ImageInfoRecord imageInfoRecord = new ImageInfoRecord("someName", "someDescription", "someProject", "someEndpoint", "someUser", "someApp",
 				"someFullName", ImageStatus.CREATED);
 		return Collections.singletonList(imageInfoRecord);
 	}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java
index b92335e..0f63cb9 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java
@@ -37,7 +37,17 @@
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class InfrastructureInfoResourceTest extends TestBase {
 
@@ -84,7 +94,7 @@
 	@Test
 	public void healthStatus() {
 		HealthStatusPageDTO hspDto = getHealthStatusPageDTO();
-		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean())).thenReturn(hspDto);
+		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean())).thenReturn(hspDto);
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.queryParam("full", "1")
@@ -96,7 +106,7 @@
 		assertEquals(hspDto.getStatus(), response.readEntity(HealthStatusPageDTO.class).getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
@@ -104,7 +114,7 @@
 	public void healthStatusWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
 		HealthStatusPageDTO hspDto = getHealthStatusPageDTO();
-		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean())).thenReturn(hspDto);
+		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean())).thenReturn(hspDto);
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.queryParam("full", "1")
@@ -116,14 +126,14 @@
 		assertEquals(hspDto.getStatus(), response.readEntity(HealthStatusPageDTO.class).getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
 	@Test
 	public void healthStatusWithDefaultQueryParam() {
 		HealthStatusPageDTO hspDto = getHealthStatusPageDTO();
-		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean())).thenReturn(hspDto);
+		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean())).thenReturn(hspDto);
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.request()
@@ -134,14 +144,14 @@
 		assertEquals(hspDto.getStatus(), response.readEntity(HealthStatusPageDTO.class).getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
 	@Test
 	public void healthStatusWithException() {
 		doThrow(new DlabException("Could not return status of resources for user"))
-				.when(infrastructureInfoService).getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean());
+				.when(infrastructureInfoService).getHeathStatus(any(UserInfo.class), anyBoolean());
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.request()
@@ -151,7 +161,7 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
@@ -159,7 +169,7 @@
 	@Test
 	public void getUserResourcesWithException() {
 		doThrow(new DlabException("Could not load list of provisioned resources for user"))
-				.when(infrastructureInfoService).getUserResources(anyString());
+				.when(infrastructureInfoService).getUserResources(any(UserInfo.class));
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/info")
 				.request()
@@ -169,7 +179,7 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getUserResources(USER.toLowerCase());
+		verify(infrastructureInfoService).getUserResources(any());
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
@@ -191,8 +201,8 @@
 	}
 
 	private HealthStatusPageDTO getHealthStatusPageDTO() {
-		HealthStatusPageDTO hspdto = new HealthStatusPageDTO();
-		hspdto.setStatus("someStatus");
-		return hspdto;
+		return HealthStatusPageDTO.builder()
+				.status("someStatus")
+				.build();
 	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/KeyUploaderResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/KeyUploaderResourceTest.java
deleted file mode 100644
index 1628cba..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/KeyUploaderResourceTest.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.resources;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.service.AccessKeyService;
-import com.epam.dlab.dto.keyload.KeyLoadStatus;
-import com.epam.dlab.exceptions.DlabException;
-import io.dropwizard.auth.AuthenticationException;
-import io.dropwizard.testing.junit.ResourceTestRule;
-import org.apache.http.HttpStatus;
-import org.glassfish.jersey.media.multipart.FormDataMultiPart;
-import org.glassfish.jersey.media.multipart.MultiPartFeature;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
-
-public class KeyUploaderResourceTest extends TestBase {
-
-	private AccessKeyService keyService = mock(AccessKeyService.class);
-
-	@Rule
-	public final ResourceTestRule resources = getResourceTestRuleInstance(new KeyUploaderResource(keyService));
-
-	@Before
-	public void setup() throws AuthenticationException {
-		authSetup();
-	}
-
-	@Test
-	public void checkKey() {
-		when(keyService.getUserKeyStatus(anyString())).thenReturn(KeyLoadStatus.SUCCESS);
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).getUserKeyStatus(USER.toLowerCase());
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void checkKeyWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(keyService.getUserKeyStatus(anyString())).thenReturn(KeyLoadStatus.SUCCESS);
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).getUserKeyStatus(USER.toLowerCase());
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void checkKeyWithErrorStatus() {
-		when(keyService.getUserKeyStatus(anyString())).thenReturn(KeyLoadStatus.ERROR);
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).getUserKeyStatus(USER.toLowerCase());
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void loadKey() {
-		when(keyService.uploadKey(any(UserInfo.class), anyString(), anyBoolean())).thenReturn("someUuid");
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "ssh-h;glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).uploadKey(getUserInfo(), "ssh-h;glfh;lgfmhgfmmgfkl", true);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void loadKeyWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(keyService.uploadKey(any(UserInfo.class), anyString(), anyBoolean())).thenReturn("someUuid");
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "ssh-h;glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).uploadKey(getUserInfo(), "ssh-h;glfh;lgfmhgfmmgfkl", true);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void loadKeyWithWrongKeyFormat() {
-		when(keyService.uploadKey(any(UserInfo.class), anyString(), anyBoolean())).thenReturn("someUuid");
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(keyService);
-	}
-
-	@Test
-	public void loadKeyWithException() {
-		doThrow(new DlabException("Could not upload the key and create EDGE node"))
-				.when(keyService).uploadKey(any(UserInfo.class), anyString(), anyBoolean());
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "ssh-h;glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).uploadKey(getUserInfo(), "ssh-h;glfh;lgfmhgfmmgfkl", true);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void reuploadKey() {
-		when(keyService.uploadKey(any(UserInfo.class), anyString(), anyBoolean())).thenReturn("someUuid");
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "ssh-h;glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.queryParam("is_primary_uploading", "false")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).uploadKey(getUserInfo(), "ssh-h;glfh;lgfmhgfmmgfkl", false);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void reuploadKeyWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(keyService.uploadKey(any(UserInfo.class), anyString(), anyBoolean())).thenReturn("someUuid");
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "ssh-h;glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.queryParam("is_primary_uploading", "false")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).uploadKey(getUserInfo(), "ssh-h;glfh;lgfmhgfmmgfkl", false);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void reuploadKeyWithWrongKeyFormat() {
-		when(keyService.uploadKey(any(UserInfo.class), anyString(), anyBoolean())).thenReturn("someUuid");
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.queryParam("is_primary_uploading", "false")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(keyService);
-	}
-
-	@Test
-	public void reuploadKeyWithException() {
-		doThrow(new DlabException("Could not reupload the key. Previous key has been deleted"))
-				.when(keyService).uploadKey(any(UserInfo.class), anyString(), anyBoolean());
-
-		FormDataMultiPart multiPart = new FormDataMultiPart()
-				.field("file", "ssh-h;glfh;lgfmhgfmmgfkl");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key")
-				.queryParam("is_primary_uploading", "false")
-				.register(MultiPartFeature.class)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.entity(multiPart, multiPart.getMediaType()));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).uploadKey(getUserInfo(), "ssh-h;glfh;lgfmhgfmmgfkl", false);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void recoverEdge() {
-		when(keyService.recoverEdge(any(UserInfo.class))).thenReturn("someUuid");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key/recover")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(""));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).recoverEdge(getUserInfo());
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void recoverEdgeWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(keyService.recoverEdge(any(UserInfo.class))).thenReturn("someUuid");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key/recover")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(""));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).recoverEdge(getUserInfo());
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void recoverEdgeWithException() {
-		doThrow(new DlabException("Could not upload the key and create EDGE node"))
-				.when(keyService).recoverEdge(any(UserInfo.class));
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key/recover")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(""));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).recoverEdge(getUserInfo());
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void generateKey() {
-		when(keyService.generateKey(any(UserInfo.class), anyBoolean())).thenReturn("someUuid");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key/generate")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(""));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_OCTET_STREAM, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).generateKey(getUserInfo(), true);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void generateKeyWithoutEdgeCreation() {
-		when(keyService.generateKey(any(UserInfo.class), anyBoolean())).thenReturn("someUuid");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key/generate")
-				.queryParam("is_primary_uploading", "false")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(""));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_OCTET_STREAM, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).generateKey(getUserInfo(), false);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void generateKeyWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(keyService.generateKey(any(UserInfo.class), anyBoolean())).thenReturn("someUuid");
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key/generate")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(""));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_OCTET_STREAM, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).generateKey(getUserInfo(), true);
-		verifyNoMoreInteractions(keyService);
-	}
-
-	@Test
-	public void generateKeyWithException() {
-		doThrow(new DlabException("Can not generate private/public key pair due to"))
-				.when(keyService).generateKey(any(UserInfo.class), anyBoolean());
-
-		final Response response = resources.getJerseyTest()
-				.target("/user/access_key/generate")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(""));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(keyService).generateKey(getUserInfo(), true);
-		verifyNoMoreInteractions(keyService);
-	}
-
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
index 50f6763..c7f5ced 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
@@ -22,7 +22,12 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.dto.*;
+import com.epam.dlab.backendapi.resources.dto.LibInfoRecord;
+import com.epam.dlab.backendapi.resources.dto.LibInstallFormDTO;
+import com.epam.dlab.backendapi.resources.dto.LibKey;
+import com.epam.dlab.backendapi.resources.dto.LibraryDTO;
+import com.epam.dlab.backendapi.resources.dto.LibraryStatus;
+import com.epam.dlab.backendapi.resources.dto.SearchLibsFormDTO;
 import com.epam.dlab.backendapi.service.ExternalLibraryService;
 import com.epam.dlab.backendapi.service.LibraryService;
 import com.epam.dlab.dto.UserInstanceDTO;
@@ -50,331 +55,355 @@
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class LibExploratoryResourceTest extends TestBase {
 
-	private static final String LIB_GROUP = "group";
-	private static final String LIB_NAME = "name";
-	private static final String LIB_VERSION = "version";
-	private static final String EXPLORATORY_NAME = "explName";
-	private static final String COMPUTATIONAL_NAME = "compName";
-	private static final String UUID = "uid";
-	private ExploratoryDAO exploratoryDAO = mock(ExploratoryDAO.class);
-	private LibraryService libraryService = mock(LibraryService.class);
-	private RESTService provisioningService = mock(RESTService.class);
-	private ExternalLibraryService externalLibraryService = mock(ExternalLibraryService.class);
-	private RequestId requestId = mock(RequestId.class);
+    private static final String LIB_GROUP = "group";
+    private static final String LIB_NAME = "name";
+    private static final String LIB_VERSION = "version";
+    private static final String EXPLORATORY_NAME = "explName";
+    private static final String PROJECT = "projectName";
+    private static final String COMPUTATIONAL_NAME = "compName";
+    private static final String UUID = "uid";
+    private ExploratoryDAO exploratoryDAO = mock(ExploratoryDAO.class);
+    private LibraryService libraryService = mock(LibraryService.class);
+    private RESTService provisioningService = mock(RESTService.class);
+    private ExternalLibraryService externalLibraryService = mock(ExternalLibraryService.class);
+    private RequestId requestId = mock(RequestId.class);
 
-	@Rule
-	public final ResourceTestRule resources = getResourceTestRuleInstance(
-			new LibExploratoryResource(exploratoryDAO, libraryService, externalLibraryService));
+    @Rule
+    public final ResourceTestRule resources = getResourceTestRuleInstance(
+            new LibExploratoryResource(exploratoryDAO, libraryService, externalLibraryService));
 
-	@Before
-	public void setup() throws AuthenticationException {
-		authSetup();
-	}
+    @Before
+    public void setup() throws AuthenticationException {
+        authSetup();
+    }
 
-	@Test
-	public void getLibGroupListWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn
-				(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+    @Test
+    public void getLibGroupListWithFailedAuth() throws AuthenticationException {
+        authFailSetup();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibGroupListWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn
-				(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn
+                (getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibGroupListWithoutComputationalWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibGroupListWithoutComputationalWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibList() {
-		when(libraryService.getLibs(anyString(), anyString(), anyString())).thenReturn(getDocuments());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(libraryService.getLibs(anyString(), anyString(), anyString(), anyString())).thenReturn(getDocuments());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
-		}));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
+        }));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(libraryService.getLibs(anyString(), anyString(), anyString())).thenReturn(getDocuments());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(libraryService.getLibs(anyString(), anyString(), anyString(), anyString())).thenReturn(getDocuments());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
-		}));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
+        }));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListWithException() {
-		doThrow(new DlabException("Cannot load installed libraries"))
-				.when(libraryService).getLibs(anyString(), anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new DlabException("Cannot load installed libraries"))
+                .when(libraryService).getLibs(anyString(), anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListFormatted() {
-		when(libraryService.getLibInfo(anyString(), anyString())).thenReturn(getLibInfoRecords());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(libraryService.getLibInfo(anyString(), anyString(), anyString())).thenReturn(getLibInfoRecords());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListFormattedWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(libraryService.getLibInfo(anyString(), anyString())).thenReturn(getLibInfoRecords());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(libraryService.getLibInfo(anyString(), anyString(), anyString())).thenReturn(getLibInfoRecords());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListFormattedWithException() {
-		doThrow(new DlabException("Cannot load  formatted list of installed libraries"))
-				.when(libraryService).getLibInfo(anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new DlabException("Cannot load  formatted list of installed libraries"))
+                .when(libraryService).getLibInfo(anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void libInstall() {
-		when(libraryService.installComputationalLibs(any(UserInfo.class), anyString(), anyString(),
-				anyListOf(LibInstallDTO.class))).thenReturn(UUID);
-		LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
-		libInstallFormDTO.setComputationalName(COMPUTATIONAL_NAME);
-		libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
-		libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_install")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(libInstallFormDTO));
+        when(libraryService.installComputationalLibs(any(UserInfo.class), anyString(), anyString(),
+                anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
+        LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
+        libInstallFormDTO.setComputationalName(COMPUTATIONAL_NAME);
+        libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
+        libInstallFormDTO.setProject(PROJECT);
+        libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_install")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(libInstallFormDTO));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-		assertEquals(UUID, response.readEntity(String.class));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(UUID, response.readEntity(String.class));
 
-		verify(libraryService).installComputationalLibs(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
-				eq(COMPUTATIONAL_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
-		verifyNoMoreInteractions(libraryService);
-		verifyZeroInteractions(provisioningService, requestId);
-	}
+        verify(libraryService).installComputationalLibs(refEq(getUserInfo()), eq(PROJECT),
+                eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
+        verifyNoMoreInteractions(libraryService);
+        verifyZeroInteractions(provisioningService, requestId);
+    }
 
 
 	@Test
 	public void libInstallWithoutComputational() {
-		when(libraryService.installExploratoryLibs(any(UserInfo.class), anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
-		LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
-		libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
-		libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_install")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(libInstallFormDTO));
+        when(libraryService.installExploratoryLibs(any(UserInfo.class), anyString(), anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
+        LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
+        libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
+        libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
+        libInstallFormDTO.setProject(PROJECT);
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_install")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(libInstallFormDTO));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-		assertEquals(UUID, response.readEntity(String.class));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(UUID, response.readEntity(String.class));
 
-		verify(libraryService).installExploratoryLibs(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
-				eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
-		verifyNoMoreInteractions(libraryService);
-		verifyZeroInteractions(provisioningService, requestId);
-	}
+        verify(libraryService).installExploratoryLibs(refEq(getUserInfo()), eq(PROJECT),
+                eq(EXPLORATORY_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
+        verifyNoMoreInteractions(libraryService);
+        verifyZeroInteractions(provisioningService, requestId);
+    }
 
 	@Test
 	public void getLibraryListWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
-				.thenReturn(getUserInstanceDto());
-		SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
-		searchLibsFormDTO.setComputationalName("compName");
-		searchLibsFormDTO.setNotebookName("explName");
-		searchLibsFormDTO.setGroup("someGroup");
-		searchLibsFormDTO.setStartWith("someText");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/search/lib_list")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(searchLibsFormDTO));
+        authFailSetup();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+        searchLibsFormDTO.setComputationalName("compName");
+        searchLibsFormDTO.setNotebookName("explName");
+        searchLibsFormDTO.setGroup("someGroup");
+        searchLibsFormDTO.setStartWith("someText");
+        searchLibsFormDTO.setProjectName("projectName");
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(searchLibsFormDTO));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibraryListWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
-				.thenReturn(getUserInstanceDto());
-		SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
-		searchLibsFormDTO.setComputationalName("compName");
-		searchLibsFormDTO.setNotebookName("explName");
-		searchLibsFormDTO.setGroup("someGroup");
-		searchLibsFormDTO.setStartWith("someText");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/search/lib_list")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(searchLibsFormDTO));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+        searchLibsFormDTO.setComputationalName("compName");
+        searchLibsFormDTO.setNotebookName("explName");
+        searchLibsFormDTO.setGroup("someGroup");
+        searchLibsFormDTO.setStartWith("someText");
+        searchLibsFormDTO.setProjectName("projectName");
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(searchLibsFormDTO));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibraryListWithoutComputationalWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString()))
-				.thenReturn(getUserInstanceDto());
-		SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
-		searchLibsFormDTO.setComputationalName("");
-		searchLibsFormDTO.setNotebookName("explName");
-		searchLibsFormDTO.setGroup("someGroup");
-		searchLibsFormDTO.setStartWith("someText");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/search/lib_list")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(searchLibsFormDTO));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+        searchLibsFormDTO.setComputationalName("");
+        searchLibsFormDTO.setNotebookName("explName");
+        searchLibsFormDTO.setGroup("someGroup");
+        searchLibsFormDTO.setStartWith("someText");
+        searchLibsFormDTO.setProjectName("projectName");
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(searchLibsFormDTO));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getMavenArtifact() {
@@ -422,11 +451,14 @@
 	}
 
 	private UserInstanceDTO getUserInstanceDto() {
-		UserComputationalResource ucResource = new UserComputationalResource();
-		ucResource.setComputationalName("compName");
-		return new UserInstanceDTO().withUser(USER).withExploratoryName("explName")
-				.withResources(singletonList(ucResource));
-	}
+        UserComputationalResource ucResource = new UserComputationalResource();
+        ucResource.setComputationalName("compName");
+        return new UserInstanceDTO()
+                .withUser(USER)
+                .withExploratoryName("explName")
+                .withProject(PROJECT)
+                .withResources(singletonList(ucResource));
+    }
 
 	private List<Document> getDocuments() {
 		return singletonList(new Document());
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
index 88cef73..1f6fc46 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
@@ -21,6 +21,7 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.KeysDTO;
+import com.epam.dlab.backendapi.resources.dto.ProjectActionFormDTO;
 import com.epam.dlab.backendapi.service.AccessKeyService;
 import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.exceptions.DlabException;
@@ -35,11 +36,18 @@
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+import java.util.Collections;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
 
 public class ProjectResourceTest extends TestBase {
     private ProjectService projectService = mock(ProjectService.class);
@@ -49,36 +57,34 @@
     public final ResourceTestRule resources = getResourceTestRuleInstance(
             new ProjectResource(projectService, keyService));
 
-
     @Before
     public void setup() throws AuthenticationException {
         authSetup();
     }
 
     @Test
-    public void getProjectsForManaging() {
+    public void stopProject() {
         final Response response = resources.getJerseyTest()
-                .target("project/managing")
+                .target("project/stop")
                 .request()
                 .header("Authorization", "Bearer " + TOKEN)
-                .get();
+                .post(Entity.json(getProjectActionDTO()));
 
-        assertEquals(HttpStatus.SC_OK, response.getStatus());
-        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-        verify(projectService, times(1)).getProjectsForManaging();
+        assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
+        verify(projectService).stopWithResources(any(UserInfo.class), anyList(), anyString());
         verifyNoMoreInteractions(projectService);
     }
 
     @Test
-    public void stopProjectWithResources() {
+    public void startProject() {
         final Response response = resources.getJerseyTest()
-                .target("project/managing/stop/" + "projectName")
+                .target("project/start")
                 .request()
                 .header("Authorization", "Bearer " + TOKEN)
-                .post(Entity.json(""));
+                .post(Entity.json(getProjectActionDTO()));
 
         assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
-        verify(projectService, times(1)).stopWithResources(any(UserInfo.class), anyString());
+        verify(projectService).start(any(UserInfo.class), anyList(), anyString());
         verifyNoMoreInteractions(projectService);
     }
 
@@ -117,4 +123,8 @@
         verify(keyService).generateKeys(getUserInfo());
         verifyNoMoreInteractions(keyService);
     }
-}
\ No newline at end of file
+
+    private ProjectActionFormDTO getProjectActionDTO() {
+        return new ProjectActionFormDTO("DLAB", Collections.singletonList("https://localhost:8083/"));
+    }
+}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
index 08e601e..c763238 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
@@ -36,7 +36,12 @@
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
 import java.time.temporal.ChronoUnit;
 import java.util.Arrays;
 import java.util.Collections;
@@ -46,7 +51,13 @@
 import static org.junit.Assert.assertNull;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 public class SchedulerJobResourceTest extends TestBase {
 
@@ -63,228 +74,228 @@
 
 	@Test
 	public void updateExploratoryScheduler() {
-		doNothing().when(schedulerJobService)
-				.updateExploratorySchedulerData(anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doNothing().when(schedulerJobService)
+                .updateExploratorySchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(),
-				"explName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+                "explName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void updateExploratorySchedulerWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(schedulerJobService)
-				.updateExploratorySchedulerData(anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", String.join(" ", "Bearer", TOKEN))
-				.post(Entity.json(getSchedulerJobDTO()));
+        authFailSetup();
+        doNothing().when(schedulerJobService)
+                .updateExploratorySchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", String.join(" ", "Bearer", TOKEN))
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(),
-				"explName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+                "explName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void updateExploratorySchedulerWithException() {
-		doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
-				.when(schedulerJobService).updateExploratorySchedulerData(anyString(), anyString(),
-				any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
+                .when(schedulerJobService).updateExploratorySchedulerData(anyString(), anyString(), anyString(),
+                any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "explName",
-				getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+                "explName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void upsertComputationalScheduler() {
-		doNothing().when(schedulerJobService)
-				.updateComputationalSchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doNothing().when(schedulerJobService)
+                .updateComputationalSchedulerData(anyString(), anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
-				"compName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+                "explName", "compName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void upsertComputationalSchedulerWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(schedulerJobService)
-				.updateComputationalSchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        authFailSetup();
+        doNothing().when(schedulerJobService)
+                .updateComputationalSchedulerData(anyString(), anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
-				"compName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+                "explName", "compName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void upsertComputationalSchedulerWithException() {
-		doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
-				.when(schedulerJobService).updateComputationalSchedulerData(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
+                .when(schedulerJobService).updateComputationalSchedulerData(anyString(), anyString(), anyString(),
+                anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
-				"compName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+                "explName", "compName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratory() {
-		when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratoryWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratoryWithException() {
-		doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory"))
-				.when(schedulerJobService).fetchSchedulerJobForUserAndExploratory(anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory"))
+                .when(schedulerJobService).fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForComputationalResource() {
-		when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
-				"explName", "compName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+                "explName", "compName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForComputationalResourceWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
-				"explName", "compName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+                "explName", "compName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForComputationalResourceWithException() {
-		doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory with " +
-				"computational resource")).when(schedulerJobService)
-				.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory with " +
+                "computational resource")).when(schedulerJobService)
+                .fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
-				"explName", "compName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+                "explName", "compName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void testGetActiveSchedulers() {
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java
index 1953694..713eda9 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java
@@ -19,14 +19,13 @@
 
 package com.epam.dlab.backendapi.resources;
 
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.resources.dto.GroupDTO;
-import com.epam.dlab.backendapi.resources.dto.UpdateRoleGroupDto;
-import com.epam.dlab.backendapi.resources.dto.UpdateUserGroupDto;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 import com.epam.dlab.backendapi.service.UserGroupService;
 import io.dropwizard.auth.AuthenticationException;
 import io.dropwizard.testing.junit.ResourceTestRule;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.http.HttpStatus;
 import org.junit.Before;
 import org.junit.Rule;
@@ -41,26 +40,31 @@
 import java.util.List;
 import java.util.Set;
 
-import static java.util.Collections.singleton;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class UserGroupResourceTest extends TestBase {
 
-	private static final String USER = "user";
-	private static final String ROLE_ID = "id";
-	private static final String GROUP = "group";
-	private UserGroupService userGroupService = mock(UserGroupService.class);
+    private static final String USER = "user";
+    private static final String ROLE_ID = "id";
+    private static final String GROUP = "group";
+    private UserGroupService userGroupService = mock(UserGroupService.class);
+    private ProjectDAO projectDAO = mock(ProjectDAO.class);
 
-	@Before
-	public void setup() throws AuthenticationException {
-		authSetup();
-	}
+    @Before
+    public void setup() throws AuthenticationException {
+        authSetup();
+    }
 
-	@Rule
-	public final ResourceTestRule resources =
-			getResourceTestRuleInstance(new UserGroupResource(userGroupService));
+    @Rule
+    public final ResourceTestRule resources =
+            getResourceTestRuleInstance(new UserGroupResource(userGroupService));
 
 	@Test
 	public void createGroup() {
@@ -116,77 +120,31 @@
 
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 
-		verify(userGroupService).updateGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+		verify(userGroupService).updateGroup(getUserInfo(), GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
 		verifyNoMoreInteractions(userGroupService);
 	}
 
 	@Test
 	public void getGroups() {
-		when(userGroupService.getAggregatedRolesByGroup()).thenReturn(Collections.singletonList(getUserGroup()));
+        when(userGroupService.getAggregatedRolesByGroup(any(UserInfo.class))).thenReturn(Collections.singletonList(getUserGroup()));
 
-		final Response response = resources.getJerseyTest()
-				.target("/group")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        final Response response = resources.getJerseyTest()
+                .target("/group")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		final List<UserGroupDto> actualRoles = response.readEntity(new GenericType<List<UserGroupDto>>() {
-		});
+        final List<UserGroupDto> actualRoles = response.readEntity(new GenericType<List<UserGroupDto>>() {
+        });
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(GROUP, actualRoles.get(0).getGroup());
-		assertTrue(actualRoles.get(0).getRoles().isEmpty());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(GROUP, actualRoles.get(0).getGroup());
+        assertTrue(actualRoles.get(0).getRoles().isEmpty());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(userGroupService).getAggregatedRolesByGroup();
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void addRolesToGroup() {
-
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateRoleGroupDto(singleton(ROLE_ID), GROUP)));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-		verify(userGroupService).updateRolesForGroup(GROUP, singleton(ROLE_ID));
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void addRolesToGroupWithValidationException() {
-
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateRoleGroupDto(singleton(ROLE_ID), StringUtils.EMPTY)));
-
-		assertEquals(HttpStatus.SC_UNPROCESSABLE_ENTITY, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
-	@Test
-	public void deleteGroupFromRole() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.queryParam("group", GROUP)
-				.queryParam("roleId", ROLE_ID)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-
-		verify(userGroupService).removeGroupFromRole(singleton(GROUP), singleton(ROLE_ID));
-		verifyNoMoreInteractions(userGroupService);
-	}
+        verify(userGroupService).getAggregatedRolesByGroup(getUserInfo());
+        verifyNoMoreInteractions(userGroupService);
+    }
 
 	@Test
 	public void deleteGroup() {
@@ -203,89 +161,15 @@
 		verifyNoMoreInteractions(userGroupService);
 	}
 
-	@Test
-	public void deleteGroupFromRoleWithValidationException() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.queryParam("group", GROUP)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_BAD_REQUEST, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
-	@Test
-	public void addUserToGroup() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateUserGroupDto(GROUP, singleton(USER))));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-		verify(userGroupService).addUsersToGroup(GROUP, singleton(USER));
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void addUserToGroupWithValidationException() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateUserGroupDto(StringUtils.EMPTY, singleton(USER))));
-
-		assertEquals(HttpStatus.SC_UNPROCESSABLE_ENTITY, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
-	@Test
-	public void deleteUserFromGroup() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.queryParam("user", USER)
-				.queryParam("group", GROUP)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-
-		verify(userGroupService).removeUserFromGroup(GROUP, USER);
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void deleteUserFromGroupWithValidationException() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.queryParam("group", GROUP)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_BAD_REQUEST, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
 	private UserGroupDto getUserGroup() {
 		return new UserGroupDto(GROUP, Collections.emptyList(), Collections.emptySet());
-	}
+    }
 
-	private GroupDTO getCreateGroupDto(String group, Set<String> roleIds) {
-		final GroupDTO dto = new GroupDTO();
-		dto.setName(group);
-		dto.setRoleIds(roleIds);
-		dto.setUsers(Collections.singleton(USER));
-		return dto;
-	}
-
-
+    private GroupDTO getCreateGroupDto(String group, Set<String> roleIds) {
+        final GroupDTO dto = new GroupDTO();
+        dto.setName(group);
+        dto.setRoleIds(roleIds);
+        dto.setUsers(Collections.singleton(USER));
+        return dto;
+    }
 }
\ No newline at end of file
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java
index 6c0f5be..c4e2bd6 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java
@@ -35,9 +35,12 @@
 import java.util.Collections;
 import java.util.List;
 
-import static java.util.Collections.singleton;
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 public class UserRoleResourceTest extends TestBase {
 
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java
index 83fab66..883630c 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java
@@ -19,6 +19,7 @@
 package com.epam.dlab.backendapi.service;
 
 import com.epam.dlab.backendapi.dao.UserRoleDao;
+import com.epam.dlab.backendapi.resources.TestBase;
 import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import org.junit.Rule;
@@ -29,37 +30,24 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
-public class UserRoleServiceImplTest {
+public class UserRoleServiceImplTest extends TestBase {
 
-	private static final String ROLE_ID = "roleId";
-	@Mock
-	private UserRoleDao dao;
-	@InjectMocks
-	private UserRoleServiceImpl userRoleService;
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
+    private static final String ROLE_ID = "roleId";
+    @Mock
+    private UserRoleDao dao;
+    @InjectMocks
+    private UserRoleServiceImpl userRoleService;
+    @Rule
+    public ExpectedException expectedException = ExpectedException.none();
 
-	@Test
-	public void getUserRoles() {
-		when(dao.findAll()).thenReturn(Collections.singletonList(getUserRole()));
-		final List<UserRoleDto> roles = userRoleService.getUserRoles();
-
-		assertEquals(1, roles.size());
-		assertEquals(ROLE_ID, roles.get(0).getId());
-
-		verify(dao).findAll();
-		verifyNoMoreInteractions(dao);
-	}
-
-
-	@Test
+    @Test
 	public void createRole() {
 
 		userRoleService.createRole(getUserRole());
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsBillingServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsBillingServiceTest.java
deleted file mode 100644
index e24e009..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsBillingServiceTest.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.aws.AwsBillingFilter;
-import com.epam.dlab.exceptions.DlabException;
-import org.bson.Document;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import java.text.ParseException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
-@RunWith(MockitoJUnitRunner.class)
-public class AwsBillingServiceTest {
-
-	private UserInfo userInfo;
-	private AwsBillingFilter billingFilter;
-	private Document basicDocument;
-
-	@Mock
-	private AwsBillingDAO billingDAO;
-
-	@InjectMocks
-	private AwsBillingService awsBillingService;
-
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
-
-	@Before
-	public void setUp() {
-		userInfo = getUserInfo();
-		billingFilter = new AwsBillingFilter();
-		basicDocument = getBasicDocument();
-	}
-
-	@Test
-	public void getReportWithTheSameInstanceOfDocument() {
-		Document expectedDocument = new Document();
-		when(billingDAO.getReport(any(UserInfo.class), any(AwsBillingFilter.class))).thenReturn(expectedDocument);
-
-		Document actualDocument = awsBillingService.getReport(userInfo, billingFilter);
-		assertEquals(expectedDocument, actualDocument);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void getReportWithAnotherInstanceOfDocument() {
-		Document expectedDocument = new Document().append("someField", "someValue");
-		Document anotherDocument = new Document().append("someField", "anotherValue");
-		when(billingDAO.getReport(any(UserInfo.class), any(AwsBillingFilter.class))).thenReturn(anotherDocument);
-
-		Document actualDocument = awsBillingService.getReport(userInfo, billingFilter);
-		assertNotEquals(expectedDocument, actualDocument);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void getReportWithException() {
-		doThrow(new RuntimeException()).when(billingDAO).getReport(any(UserInfo.class), any(AwsBillingFilter.class));
-
-		try {
-			awsBillingService.getReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot load billing report: null", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReport() {
-		when(billingDAO.getReport(any(UserInfo.class), any(AwsBillingFilter.class))).thenReturn(basicDocument);
-
-		byte[] result = awsBillingService.downloadReport(userInfo, billingFilter);
-		assertNotNull(result);
-		assertTrue(result.length > 0);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWithInapproprietaryDateFormatInDocument() {
-		basicDocument.put("from", "someDateStart");
-		when(billingDAO.getReport(any(UserInfo.class), any(AwsBillingFilter.class))).thenReturn(basicDocument);
-
-		try {
-			awsBillingService.downloadReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot prepare CSV file", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWhenDocumentHasNotAllRequiredFields() {
-		basicDocument.remove("lines");
-		when(billingDAO.getReport(any(UserInfo.class), any(AwsBillingFilter.class))).thenReturn(basicDocument);
-
-		expectedException.expect(NullPointerException.class);
-
-		awsBillingService.downloadReport(userInfo, billingFilter);
-	}
-
-	@Test
-	public void getReportFileName() {
-		String result = awsBillingService.getReportFileName(userInfo, billingFilter);
-		assertEquals("aws-billing-report.csv", result);
-	}
-
-	@Test
-	public void getFirstLine() throws ParseException {
-		String result = awsBillingService.getFirstLine(basicDocument);
-		assertEquals("Service base name: someSBN  Resource tag ID: someTagResourceId  Available reporting " +
-				"period from: Mar 21, 2018 to: Mar 22, 2018", result);
-	}
-
-	@Test
-	public void getFirstLineWithException() throws ParseException {
-		basicDocument.put("from", "someStartDate");
-
-		expectedException.expect(ParseException.class);
-		expectedException.expectMessage("Unparseable date: \"someStartDate\"");
-
-		awsBillingService.getFirstLine(basicDocument);
-
-	}
-
-	@Test
-	public void getHeadersList() {
-		List<String> expectedResult1 =
-				Arrays.asList("USER", "ENVIRONMENT NAME", "RESOURCE TYPE", "SHAPE", "SERVICE", "SERVICE CHARGES");
-		List<String> expectedResult2 = expectedResult1.subList(1, expectedResult1.size());
-
-		List<String> actualResult1 = awsBillingService.getHeadersList(true);
-		assertEquals(expectedResult1, actualResult1);
-
-		List<String> actualResult2 = awsBillingService.getHeadersList(false);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getLine() {
-		String expectedResult1 = "someUser,someId,someResType,someShape,someProduct,someCost someCode\n";
-		String actualResult1 = awsBillingService.getLine(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		basicDocument.remove("user");
-		String expectedResult2 = "someId,someResType,someShape,someProduct,someCost someCode\n";
-		String actualResult2 = awsBillingService.getLine(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getTotal() {
-		String expectedResult1 = ",,,,,Total: someCostTotal someCode\n";
-		String actualResult1 = awsBillingService.getTotal(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		String expectedResult2 = ",,,,Total: someCostTotal someCode\n";
-		String actualResult2 = awsBillingService.getTotal(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	private UserInfo getUserInfo() {
-		return new UserInfo("user", "token");
-	}
-
-	private Document getBasicDocument() {
-		return new Document().append("service_base_name", "someSBN").append("user", "someUser")
-				.append("dlab_id", "someId").append("dlab_resource_type", "someResType")
-				.append("tag_resource_id", "someTagResourceId").append("from", "2018-03-21")
-				.append("to", "2018-03-22").append("full_report", false)
-				.append("shape", "someShape").append("product", "someProduct").append("cost", "someCost")
-				.append("cost_total", "someCostTotal").append("currency_code", "someCode")
-				.append("lines", Collections.singletonList(new Document()));
-	}
-
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureInfoServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureInfoServiceTest.java
deleted file mode 100644
index 50aeb39..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureInfoServiceTest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.dto.aws.edge.EdgeInfoAws;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-
-public class AwsInfrastructureInfoServiceTest {
-
-	@Test
-	public void getSharedInfo() {
-		EdgeInfoAws edgeInfoAws = new EdgeInfoAws();
-		edgeInfoAws.setPublicIp("ip");
-		edgeInfoAws.setUserOwnBucketName("userOwnBucketName");
-		edgeInfoAws.setSharedBucketName("sharedBucketName");
-
-		Map<String, String> expectedMap = new HashMap<>();
-		expectedMap.put("edge_node_ip", "ip");
-		expectedMap.put("user_own_bicket_name", "userOwnBucketName");
-		expectedMap.put("shared_bucket_name", "sharedBucketName");
-
-		Map<String, String> actualMap = new AwsInfrastructureInfoService().getSharedInfo(edgeInfoAws);
-		assertEquals(expectedMap, actualMap);
-	}
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureTemplateServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureTemplateServiceTest.java
deleted file mode 100644
index 118e7e8..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsInfrastructureTemplateServiceTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.resources.dto.aws.AwsEmrConfiguration;
-import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
-import com.epam.dlab.dto.imagemetadata.ComputationalMetadataDTO;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import java.lang.reflect.Field;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
-
-@RunWith(MockitoJUnitRunner.class)
-public class AwsInfrastructureTemplateServiceTest {
-
-	@Mock
-	private SelfServiceApplicationConfiguration configuration;
-
-	@InjectMocks
-	private AwsInfrastructureTemplateService awsInfrastructureTemplateService;
-
-	@Test
-	public void getCloudFullComputationalTemplate() throws NoSuchFieldException, IllegalAccessException {
-		when(configuration.getMinEmrInstanceCount()).thenReturn(2);
-		when(configuration.getMaxEmrInstanceCount()).thenReturn(1000);
-		when(configuration.getMaxEmrSpotInstanceBidPct()).thenReturn(95);
-		when(configuration.getMinEmrSpotInstanceBidPct()).thenReturn(10);
-
-		AwsEmrConfiguration expectedAwsEmrConfiguration = AwsEmrConfiguration.builder()
-				.minEmrInstanceCount(2)
-				.maxEmrInstanceCount(1000)
-				.maxEmrSpotInstanceBidPct(95)
-				.minEmrSpotInstanceBidPct(10)
-				.build();
-
-		ComputationalMetadataDTO expectedComputationalMetadataDTO =
-				new ComputationalMetadataDTO("someImageName");
-
-		FullComputationalTemplate fullComputationalTemplate =
-				awsInfrastructureTemplateService.getCloudFullComputationalTemplate(expectedComputationalMetadataDTO);
-		assertNotNull(fullComputationalTemplate);
-
-		Field actualAwsEmrConfiguration =
-				fullComputationalTemplate.getClass().getDeclaredField("awsEmrConfiguration");
-		actualAwsEmrConfiguration.setAccessible(true);
-		assertEquals(expectedAwsEmrConfiguration, actualAwsEmrConfiguration.get(fullComputationalTemplate));
-
-		Field actualComputationalMetadataDTO = fullComputationalTemplate.getClass().getSuperclass()
-				.getDeclaredField("computationalMetadataDTO");
-		actualComputationalMetadataDTO.setAccessible(true);
-		assertEquals(expectedComputationalMetadataDTO, actualComputationalMetadataDTO.get(fullComputationalTemplate));
-
-		verify(configuration).getMinEmrInstanceCount();
-		verify(configuration).getMaxEmrInstanceCount();
-		verify(configuration).getMaxEmrSpotInstanceBidPct();
-		verify(configuration).getMinEmrSpotInstanceBidPct();
-		verifyNoMoreInteractions(configuration);
-	}
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureBillingServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureBillingServiceTest.java
deleted file mode 100644
index 1fafa87..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureBillingServiceTest.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.resources.dto.azure.AzureBillingFilter;
-import com.epam.dlab.exceptions.DlabException;
-import org.bson.Document;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import java.text.ParseException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
-@RunWith(MockitoJUnitRunner.class)
-public class AzureBillingServiceTest {
-
-	private UserInfo userInfo;
-	private AzureBillingFilter billingFilter;
-	private Document basicDocument;
-
-	@Mock
-	private BillingDAO<AzureBillingFilter> billingDAO;
-
-	@InjectMocks
-	private AzureBillingService azureBillingService;
-
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
-
-	@Before
-	public void setUp() {
-		userInfo = getUserInfo();
-		billingFilter = new AzureBillingFilter();
-		basicDocument = getBasicDocument();
-	}
-
-	@Test
-	public void getReportWithTheSameInstanceOfDocument() {
-		when(billingDAO.getReport(any(UserInfo.class), any(AzureBillingFilter.class))).thenReturn(new Document());
-
-		Document actualDocument = azureBillingService.getReport(userInfo, billingFilter);
-		assertEquals(new Document(), actualDocument);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void getReportWithException() {
-		doThrow(new RuntimeException()).when(billingDAO).getReport(any(UserInfo.class), any(AzureBillingFilter.class));
-
-		try {
-			azureBillingService.getReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot load billing report: null", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReport() {
-		when(billingDAO.getReport(any(UserInfo.class), any(AzureBillingFilter.class))).thenReturn(basicDocument);
-
-		byte[] result = azureBillingService.downloadReport(userInfo, billingFilter);
-		assertNotNull(result);
-		assertTrue(result.length > 0);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWithInapproprietaryDateFormatInDocument() {
-		basicDocument.put("from", "someDateStart");
-		when(billingDAO.getReport(any(UserInfo.class), any(AzureBillingFilter.class))).thenReturn(basicDocument);
-
-		try {
-			azureBillingService.downloadReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot prepare CSV file", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWhenDocumentHasNotAllRequiredFields() {
-		basicDocument.remove("lines");
-		when(billingDAO.getReport(any(UserInfo.class), any(AzureBillingFilter.class))).thenReturn(basicDocument);
-
-		expectedException.expect(NullPointerException.class);
-
-		azureBillingService.downloadReport(userInfo, billingFilter);
-	}
-
-	@Test
-	public void getReportFileName() {
-		String result = azureBillingService.getReportFileName(userInfo, billingFilter);
-		assertEquals("azure-billing-report.csv", result);
-	}
-
-	@Test
-	public void getFirstLine() throws ParseException {
-		String result = azureBillingService.getFirstLine(basicDocument);
-		assertEquals("Service base name: someSBN  Available reporting period from: Mar 21, 2018 " +
-				"to: Mar 22, 2018", result);
-	}
-
-	@Test
-	public void getFirstLineWithException() throws ParseException {
-		basicDocument.put("from", "someStartDate");
-
-		expectedException.expect(ParseException.class);
-
-		expectedException.expectMessage("Unparseable date: \"someStartDate\"");
-		azureBillingService.getFirstLine(basicDocument);
-	}
-
-	@Test
-	public void getHeadersList() {
-		List<String> expectedResult1 =
-				Arrays.asList("USER", "ENVIRONMENT NAME", "RESOURCE TYPE", "INSTANCE SIZE", "CATEGORY", "SERVICE " +
-						"CHARGES");
-		List<String> expectedResult2 = expectedResult1.subList(1, expectedResult1.size());
-
-		List<String> actualResult1 = azureBillingService.getHeadersList(true);
-		assertEquals(expectedResult1, actualResult1);
-
-		List<String> actualResult2 = azureBillingService.getHeadersList(false);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getLine() {
-		String expectedResult1 = "someUser,someId,someResType,someSize,someMeterCategory,someCost someCode\n";
-		String actualResult1 = azureBillingService.getLine(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		basicDocument.remove("user");
-		String expectedResult2 = "someId,someResType,someSize,someMeterCategory,someCost someCode\n";
-		String actualResult2 = azureBillingService.getLine(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getTotal() {
-		String expectedResult1 = ",,,,,Total: someCost someCode\n";
-		String actualResult1 = azureBillingService.getTotal(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		String expectedResult2 = ",,,,Total: someCost someCode\n";
-		String actualResult2 = azureBillingService.getTotal(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	private UserInfo getUserInfo() {
-		return new UserInfo("user", "token");
-	}
-
-	private Document getBasicDocument() {
-		return new Document().append("service_base_name", "someSBN").append("user", "someUser")
-				.append("dlabId", "someId").append("resourceType", "someResType")
-				.append("from", "2018-03-21").append("size", "someSize")
-				.append("to", "2018-03-22").append("full_report", false)
-				.append("meterCategory", "someMeterCategory").append("costString", "someCost")
-				.append("currencyCode", "someCode").append("lines", Collections.singletonList(new Document()));
-	}
-
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureInfoServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureInfoServiceTest.java
deleted file mode 100644
index 37f5e46..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureInfoServiceTest.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.dto.azure.edge.EdgeInfoAzure;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-
-public class AzureInfrastructureInfoServiceTest {
-
-	@Test
-	public void getSharedInfo() {
-		EdgeInfoAzure edgeInfoAzure = new EdgeInfoAzure();
-		edgeInfoAzure.setPublicIp("ip");
-		edgeInfoAzure.setUserContainerName("userContainerName");
-		edgeInfoAzure.setSharedContainerName("sharedContainerName");
-		edgeInfoAzure.setUserStorageAccountName("userStorageAccountName");
-		edgeInfoAzure.setSharedStorageAccountName("sharedStorageAccountName");
-		edgeInfoAzure.setDataLakeName("datalakeName");
-		edgeInfoAzure.setDataLakeDirectoryName("datalakeUserDirectoryName");
-		edgeInfoAzure.setDataLakeSharedDirectoryName("datalakeSharedDirectoryName");
-
-		Map<String, String> expectedMap = new HashMap<>();
-		expectedMap.put("edge_node_ip", "ip");
-		expectedMap.put("user_container_name", "userContainerName");
-		expectedMap.put("shared_container_name", "sharedContainerName");
-		expectedMap.put("user_storage_account_name", "userStorageAccountName");
-		expectedMap.put("shared_storage_account_name", "sharedStorageAccountName");
-		expectedMap.put("datalake_name", "datalakeName");
-		expectedMap.put("datalake_user_directory_name", "datalakeUserDirectoryName");
-		expectedMap.put("datalake_shared_directory_name", "datalakeSharedDirectoryName");
-
-		Map<String, String> actualMap = new AzureInfrastructureInfoService().getSharedInfo(edgeInfoAzure);
-		assertEquals(expectedMap, actualMap);
-	}
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureTemplateServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureTemplateServiceTest.java
deleted file mode 100644
index 0da7503..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureInfrastructureTemplateServiceTest.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.dto.imagemetadata.ComputationalMetadataDTO;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-public class AzureInfrastructureTemplateServiceTest {
-
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
-
-	@Test
-	public void getCloudFullComputationalTemplate() {
-		expectedException.expect(UnsupportedOperationException.class);
-		expectedException.expectMessage("Operation is not supported currently");
-		new AzureInfrastructureTemplateService().getCloudFullComputationalTemplate(new ComputationalMetadataDTO());
-	}
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureInfoServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureInfoServiceTest.java
deleted file mode 100644
index b57daf3..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureInfoServiceTest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.gcp;
-
-import com.epam.dlab.dto.gcp.edge.EdgeInfoGcp;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-
-public class GcpInfrastructureInfoServiceTest {
-
-	@Test
-	public void getSharedInfo() {
-		EdgeInfoGcp edgeInfoGcp = new EdgeInfoGcp();
-		edgeInfoGcp.setPublicIp("ip");
-		edgeInfoGcp.setUserOwnBucketName("userOwnBucketName");
-		edgeInfoGcp.setSharedBucketName("sharedBucketName");
-
-		Map<String, String> expectedMap = new HashMap<>();
-		expectedMap.put("edge_node_ip", "ip");
-		expectedMap.put("user_own_bucket_name", "userOwnBucketName");
-		expectedMap.put("shared_bucket_name", "sharedBucketName");
-
-		Map<String, String> actualMap = new GcpInfrastructureInfoService().getSharedInfo(edgeInfoGcp);
-		assertEquals(expectedMap, actualMap);
-	}
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureTemplateServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureTemplateServiceTest.java
deleted file mode 100644
index 2010b76..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/gcp/GcpInfrastructureTemplateServiceTest.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.gcp;
-
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.resources.dto.gcp.GcpDataprocConfiguration;
-import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
-import com.epam.dlab.dto.imagemetadata.ComputationalMetadataDTO;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import java.lang.reflect.Field;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
-
-@RunWith(MockitoJUnitRunner.class)
-public class GcpInfrastructureTemplateServiceTest {
-
-	@Mock
-	private SelfServiceApplicationConfiguration configuration;
-
-	@InjectMocks
-	private GcpInfrastructureTemplateService gcpInfrastructureTemplateService;
-
-	@Test
-	public void getCloudFullComputationalTemplate() throws NoSuchFieldException, IllegalAccessException {
-		when(configuration.getMinInstanceCount()).thenReturn(2);
-		when(configuration.getMaxInstanceCount()).thenReturn(100);
-		when(configuration.getMinDataprocPreemptibleCount()).thenReturn(10);
-
-		GcpDataprocConfiguration expectedGcpDataprocConfiguration = GcpDataprocConfiguration.builder()
-				.minInstanceCount(2)
-				.maxInstanceCount(100)
-				.minDataprocPreemptibleInstanceCount(10)
-				.build();
-
-		ComputationalMetadataDTO expectedComputationalMetadataDTO =
-				new ComputationalMetadataDTO("someImageName");
-
-		FullComputationalTemplate fullComputationalTemplate =
-				gcpInfrastructureTemplateService.getCloudFullComputationalTemplate(expectedComputationalMetadataDTO);
-		assertNotNull(fullComputationalTemplate);
-
-		Field actualGcpDataprocConfiguration =
-				fullComputationalTemplate.getClass().getDeclaredField("gcpDataprocConfiguration");
-		actualGcpDataprocConfiguration.setAccessible(true);
-		assertEquals(expectedGcpDataprocConfiguration, actualGcpDataprocConfiguration.get(fullComputationalTemplate));
-
-		Field actualComputationalMetadataDTO = fullComputationalTemplate.getClass().getSuperclass()
-				.getDeclaredField("computationalMetadataDTO");
-		actualComputationalMetadataDTO.setAccessible(true);
-		assertEquals(expectedComputationalMetadataDTO, actualComputationalMetadataDTO.get(fullComputationalTemplate));
-
-		verify(configuration).getMinInstanceCount();
-		verify(configuration).getMaxInstanceCount();
-		verify(configuration).getMinDataprocPreemptibleCount();
-		verifyNoMoreInteractions(configuration);
-	}
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/AccessKeyServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/AccessKeyServiceImplTest.java
deleted file mode 100644
index c68a3d0..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/AccessKeyServiceImplTest.java
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.impl;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.service.ExploratoryService;
-import com.epam.dlab.backendapi.service.ReuploadKeyService;
-import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.dto.base.keyload.UploadFile;
-import com.epam.dlab.dto.keyload.KeyLoadStatus;
-import com.epam.dlab.dto.keyload.UserKeyDTO;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
-import org.apache.commons.lang3.StringUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
-@RunWith(MockitoJUnitRunner.class)
-public class AccessKeyServiceImplTest {
-
-	private final String USER = "test";
-	private final String TOKEN = "token";
-
-	private UserInfo userInfo;
-
-	@Mock
-	private KeyDAO keyDAO;
-	@Mock
-	private RESTService provisioningService;
-	@Mock
-	private RequestBuilder requestBuilder;
-	@Mock
-	private RequestId requestId;
-	@Mock
-	private ExploratoryService exploratoryService;
-	@Mock
-	private SelfServiceApplicationConfiguration configuration;
-	@Mock
-	private ReuploadKeyService reuploadKeyService;
-
-	@InjectMocks
-	private AccessKeyServiceImpl accessKeyService;
-
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
-
-	@Before
-	public void setUp() {
-		userInfo = getUserInfo();
-	}
-
-	@Test
-	public void getUserKeyStatus() {
-		when(keyDAO.findKeyStatus(anyString())).thenReturn(KeyLoadStatus.SUCCESS);
-
-		KeyLoadStatus keyLoadStatus = accessKeyService.getUserKeyStatus(USER);
-		assertEquals(KeyLoadStatus.SUCCESS, keyLoadStatus);
-
-		verify(keyDAO).findKeyStatus(USER);
-		verifyNoMoreInteractions(keyDAO);
-	}
-
-	@Test
-	public void getUserKeyStatusWithException() {
-		doThrow(new DlabException("Some message")).when(keyDAO).findKeyStatus(anyString());
-
-		KeyLoadStatus keyLoadStatus = accessKeyService.getUserKeyStatus(USER);
-		assertEquals(KeyLoadStatus.ERROR, keyLoadStatus);
-
-		verify(keyDAO).findKeyStatus(USER);
-		verifyNoMoreInteractions(keyDAO);
-	}
-
-	@Test
-	public void uploadKey() {
-		doNothing().when(keyDAO).upsertKey(anyString(), anyString(), anyBoolean());
-		doNothing().when(exploratoryService).updateExploratoriesReuploadKeyFlag(anyString(), anyBoolean(),
-				anyVararg());
-
-		UploadFile uploadFile = mock(UploadFile.class);
-		when(requestBuilder.newEdgeKeyUpload(any(UserInfo.class), anyString())).thenReturn(uploadFile);
-
-		String expectedUuid = "someUuid";
-		when(provisioningService.post(anyString(), anyString(), any(UploadFile.class), any())).
-				thenReturn(expectedUuid);
-		when(requestId.put(anyString(), anyString())).thenReturn(expectedUuid);
-
-		String keyContent = "keyContent";
-		String actualUuid = accessKeyService.uploadKey(userInfo, keyContent, true);
-		assertNotNull(actualUuid);
-		assertEquals(expectedUuid, actualUuid);
-
-		verify(keyDAO).upsertKey(USER, keyContent, true);
-		verifyZeroInteractions(exploratoryService);
-		verify(requestBuilder).newEdgeKeyUpload(userInfo, keyContent);
-		verify(provisioningService).post("infrastructure/edge/create", TOKEN, uploadFile, String.class);
-		verify(requestId).put(USER, expectedUuid);
-		verifyNoMoreInteractions(keyDAO, requestBuilder, provisioningService, requestId);
-	}
-
-
-	@Test
-	public void uploadKeyWithException() {
-		doNothing().when(keyDAO).upsertKey(anyString(), anyString(), anyBoolean());
-		doNothing().when(exploratoryService).updateExploratoriesReuploadKeyFlag(anyString(), anyBoolean(), anyVararg());
-		doThrow(new RuntimeException()).when(requestBuilder).newEdgeKeyUpload(any(UserInfo.class), anyString());
-
-		expectedException.expect(RuntimeException.class);
-
-		doNothing().when(keyDAO).deleteKey(anyString());
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not upload the key and create EDGE node: ");
-
-		accessKeyService.uploadKey(userInfo, "someKeyContent", true);
-	}
-
-	@Test
-	public void reUploadKey() {
-		doNothing().when(keyDAO).upsertKey(anyString(), anyString(), anyBoolean());
-		when(reuploadKeyService.reuploadKey(any(UserInfo.class), anyString())).thenReturn("someString");
-
-		String expectedString = "someString";
-		String keyContent = "keyContent";
-		String actualString = accessKeyService.uploadKey(userInfo, keyContent, false);
-		assertNotNull(actualString);
-		assertEquals(expectedString, actualString);
-
-		verify(keyDAO).upsertKey(USER, keyContent, false);
-		verify(reuploadKeyService).reuploadKey(userInfo, keyContent);
-		verifyNoMoreInteractions(keyDAO, reuploadKeyService);
-	}
-
-	@Test
-	public void reUploadKeyWithException() {
-		doNothing().when(keyDAO).upsertKey(anyString(), anyString(), anyBoolean());
-		doThrow(new RuntimeException()).when(reuploadKeyService).reuploadKey(any(UserInfo.class), anyString());
-
-		expectedException.expect(RuntimeException.class);
-
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not reupload the key. Previous key has been deleted:");
-
-		accessKeyService.uploadKey(userInfo, "someKeyContent", false);
-	}
-
-	@Test
-	public void recoverEdge() {
-		EdgeInfo edgeInfo = new EdgeInfo();
-		edgeInfo.setId("someId");
-		edgeInfo.setEdgeStatus("failed");
-		when(keyDAO.getEdgeInfo(anyString())).thenReturn(edgeInfo);
-
-		UserKeyDTO userKeyDTO = new UserKeyDTO();
-		userKeyDTO.withStatus("someStatus");
-		userKeyDTO.withContent("someContent");
-		when(keyDAO.fetchKey(anyString(), any(KeyLoadStatus.class))).thenReturn(userKeyDTO);
-
-		edgeInfo.setEdgeStatus("terminated");
-		edgeInfo.setInstanceId(null);
-
-		doNothing().when(keyDAO).updateEdgeInfo(anyString(), any(EdgeInfo.class));
-
-		UploadFile uploadFile = mock(UploadFile.class);
-		when(requestBuilder.newEdgeKeyUpload(any(UserInfo.class), anyString())).thenReturn(uploadFile);
-
-		String expectedUuid = "someUuid";
-		when(provisioningService.post(anyString(), anyString(), any(UploadFile.class), any()))
-				.thenReturn(expectedUuid);
-		when(requestId.put(anyString(), anyString())).thenReturn(expectedUuid);
-
-		String actualUuid = accessKeyService.recoverEdge(userInfo);
-		assertNotNull(actualUuid);
-		assertEquals(expectedUuid, actualUuid);
-
-		verify(keyDAO).getEdgeInfo(USER);
-		verify(keyDAO).fetchKey(USER, KeyLoadStatus.SUCCESS);
-		verify(keyDAO).updateEdgeInfo(USER, edgeInfo);
-
-		verify(requestBuilder).newEdgeKeyUpload(userInfo, userKeyDTO.getContent());
-		verify(provisioningService).post("infrastructure/edge/create", TOKEN, uploadFile, String.class);
-		verify(requestId).put(USER, expectedUuid);
-		verifyNoMoreInteractions(keyDAO, requestBuilder, provisioningService, requestId);
-	}
-
-	@Test
-	public void recoverEdgeWithExceptionInGetEdgeInfoMethod() {
-		EdgeInfo edgeInfo = new EdgeInfo();
-		edgeInfo.setId("someId");
-		edgeInfo.setEdgeStatus("running");
-		when(keyDAO.getEdgeInfo(anyString())).thenReturn(edgeInfo);
-
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not create EDGE node because the status of instance is running");
-
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not upload the key and create EDGE node:");
-
-		accessKeyService.recoverEdge(userInfo);
-
-		verify(keyDAO).getEdgeInfo(USER);
-		verify(keyDAO).updateEdgeStatus(USER, UserInstanceStatus.FAILED.toString());
-		verifyNoMoreInteractions(keyDAO);
-		verifyZeroInteractions(requestBuilder, provisioningService, requestId);
-	}
-
-	@Test
-	public void recoverEdgeWithExceptionInFetchKeyMethod() {
-		EdgeInfo edgeInfo = new EdgeInfo();
-		edgeInfo.setId("someId");
-		edgeInfo.setEdgeStatus("failed");
-		when(keyDAO.getEdgeInfo(anyString())).thenReturn(edgeInfo);
-
-		UserKeyDTO userKeyDTO = new UserKeyDTO();
-		userKeyDTO.withStatus("someStatus");
-		userKeyDTO.withContent("someContent");
-		doThrow(new DlabException(String.format("Key of user %s with status %s not found", USER,
-				KeyLoadStatus.SUCCESS))).when(keyDAO).fetchKey(anyString(), eq(KeyLoadStatus.SUCCESS));
-
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not upload the key and create EDGE node: ");
-
-		accessKeyService.recoverEdge(userInfo);
-
-		verify(keyDAO).getEdgeInfo(USER);
-		verify(keyDAO).fetchKey(USER, KeyLoadStatus.SUCCESS);
-		verify(keyDAO).updateEdgeStatus(USER, UserInstanceStatus.FAILED.toString());
-		verifyNoMoreInteractions(keyDAO);
-		verifyZeroInteractions(requestBuilder, provisioningService, requestId);
-	}
-
-	@Test
-	public void generateKey() {
-		doNothing().when(keyDAO).upsertKey(anyString(), anyString(), anyBoolean());
-
-		UploadFile uploadFile = mock(UploadFile.class);
-		when(requestBuilder.newEdgeKeyUpload(any(UserInfo.class), anyString())).thenReturn(uploadFile);
-
-		String someUuid = "someUuid";
-		when(configuration.getPrivateKeySize()).thenReturn(2048);
-		when(provisioningService.post(anyString(), anyString(), any(UploadFile.class), any())).thenReturn(someUuid);
-		when(requestId.put(anyString(), anyString())).thenReturn(someUuid);
-
-		String actualPrivateKey = accessKeyService.generateKey(userInfo, true);
-		assertTrue(StringUtils.isNotEmpty(actualPrivateKey));
-
-		verify(keyDAO).upsertKey(eq(USER), anyString(), eq(true));
-		verify(requestBuilder).newEdgeKeyUpload(refEq(userInfo), anyString());
-		verify(provisioningService).post("infrastructure/edge/create", TOKEN, uploadFile, String.class);
-		verify(requestId).put(USER, someUuid);
-		verifyNoMoreInteractions(keyDAO, requestBuilder, provisioningService, requestId);
-	}
-
-	@Test
-	public void generateKeyWithException() {
-		doNothing().when(keyDAO).upsertKey(anyString(), anyString(), anyBoolean());
-		when(configuration.getPrivateKeySize()).thenReturn(2048);
-		doThrow(new RuntimeException()).when(requestBuilder).newEdgeKeyUpload(any(UserInfo.class), anyString());
-		doNothing().when(keyDAO).deleteKey(anyString());
-
-		try {
-			accessKeyService.generateKey(userInfo, true);
-		} catch (DlabException e) {
-			assertEquals("Could not upload the key and create EDGE node: ", e.getMessage());
-		}
-
-		verify(keyDAO).upsertKey(eq(USER), anyString(), eq(true));
-		verify(requestBuilder).newEdgeKeyUpload(refEq(userInfo), anyString());
-		verify(keyDAO).deleteKey(USER);
-		verifyNoMoreInteractions(keyDAO, requestBuilder);
-	}
-
-	private UserInfo getUserInfo() {
-		return new UserInfo(USER, TOKEN);
-	}
-
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
index 24e0791..74fc7f0 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
@@ -24,19 +24,29 @@
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.resources.dto.ComputationalCreateFormDTO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
 import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.TagService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.SchedulerJobDTO;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
-import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
-import com.epam.dlab.dto.computational.*;
+import com.epam.dlab.dto.base.edge.EdgeInfo;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStartDTO;
+import com.epam.dlab.dto.computational.ComputationalStatusDTO;
+import com.epam.dlab.dto.computational.ComputationalStopDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.SparkStandaloneClusterResource;
+import com.epam.dlab.dto.computational.UserComputationalResource;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.epam.dlab.rest.client.RESTService;
@@ -57,40 +67,60 @@
 import java.util.List;
 import java.util.Optional;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
 import static java.util.Collections.singletonList;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 
 @RunWith(MockitoJUnitRunner.class)
 public class ComputationalServiceImplTest {
 
-	private static final long MAX_INACTIVITY = 10L;
-	private static final String DOCKER_DLAB_DATAENGINE = "docker.dlab-dataengine";
-	private static final String DOCKER_DLAB_DATAENGINE_SERVICE = "docker.dlab-dataengine-service";
-	private static final String COMP_ID = "compId";
-	private final String USER = "test";
-	private final String TOKEN = "token";
-	private final String EXPLORATORY_NAME = "expName";
-	private final String COMP_NAME = "compName";
-	private final String UUID = "1234-56789765-4321";
-	private final LocalDateTime LAST_ACTIVITY = LocalDateTime.now().minusMinutes(MAX_INACTIVITY);
+    private static final long MAX_INACTIVITY = 10L;
+    private static final String DOCKER_DLAB_DATAENGINE = "docker.dlab-dataengine";
+    private static final String DOCKER_DLAB_DATAENGINE_SERVICE = "docker.dlab-dataengine-service";
+    private static final String COMP_ID = "compId";
+    private final String USER = "test";
+    private final String TOKEN = "token";
+    private final String EXPLORATORY_NAME = "expName";
+    private final String PROJECT = "project";
+    private final String COMP_NAME = "compName";
+    private final String UUID = "1234-56789765-4321";
+    private final LocalDateTime LAST_ACTIVITY = LocalDateTime.now().minusMinutes(MAX_INACTIVITY);
 
-	private UserInfo userInfo;
-	private List<ComputationalCreateFormDTO> formList;
-	private UserInstanceDTO userInstance;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusTerminating;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusFailed;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusStopping;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusStarting;
-	private SparkStandaloneClusterResource sparkClusterResource;
-	private UserComputationalResource ucResource;
+    private UserInfo userInfo;
+    private List<ComputationalCreateFormDTO> formList;
+    private UserInstanceDTO userInstance;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusTerminating;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusFailed;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusStopping;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusStarting;
+    private SparkStandaloneClusterResource sparkClusterResource;
+    private UserComputationalResource ucResource;
 
-	@Mock
-	private ExploratoryDAO exploratoryDAO;
-	@Mock
-	private ComputationalDAO computationalDAO;
+    @Mock
+    private ProjectService projectService;
+    @Mock
+    private ExploratoryDAO exploratoryDAO;
+    @Mock
+    private ComputationalDAO computationalDAO;
 	@Mock
 	private RESTService provisioningService;
 	@Mock
@@ -125,141 +155,148 @@
 
 	@Test
 	public void createSparkCluster() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ComputationalBase compBaseMocked = mock(ComputationalBase.class);
-		when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(SparkStandaloneClusterCreateForm.class))).thenReturn(compBaseMocked);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        ComputationalBase compBaseMocked = mock(ComputationalBase.class);
+        when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class)))
+                .thenReturn(compBaseMocked);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
-		boolean creationResult =
-				computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
-		assertTrue(creationResult);
+        SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+        boolean creationResult =
+                computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+        assertTrue(creationResult);
 
-		verify(computationalDAO)
-				.addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(sparkClusterResource));
+        verify(projectService).get(PROJECT);
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(sparkClusterResource));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newComputationalCreate(
-				refEq(userInfo), refEq(userInstance), refEq(sparkClusterCreateForm));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newComputationalCreate(
+                refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(sparkClusterCreateForm), refEq(endpointDTO()));
 
-		verify(provisioningService)
-				.post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK, TOKEN, compBaseMocked,
-						String.class);
+        verify(provisioningService)
+                .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK, TOKEN, compBaseMocked,
+                        String.class);
 
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(configuration, computationalDAO, requestBuilder, provisioningService, requestId);
-	}
-	@Test
-	public void createSparkClusterWhenResourceAlreadyExists() {
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(false);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(projectService, configuration, computationalDAO, requestBuilder, provisioningService, requestId);
+    }
+
+    @Test
+    public void createSparkClusterWhenResourceAlreadyExists() {
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(false);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
 
-		boolean creationResult =
-				computationalService.createSparkCluster(userInfo, (SparkStandaloneClusterCreateForm) formList.get(0),
-						"");
-		assertFalse(creationResult);
+        boolean creationResult = computationalService.createSparkCluster(userInfo, (SparkStandaloneClusterCreateForm) formList.get(0),
+                PROJECT);
+        assertFalse(creationResult);
 
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(sparkClusterResource));
-		verifyNoMoreInteractions(configuration, computationalDAO);
-	}
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(sparkClusterResource));
+        verifyNoMoreInteractions(configuration, computationalDAO);
+    }
 
 	@Test
 	public void createSparkClusterWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(true);
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(true);
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
-		try {
-			computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
-		} catch (ResourceNotFoundException e) {
-			assertEquals("Exploratory for user with name not found", e.getMessage());
-		}
+        SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+        try {
+            computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+        } catch (ResourceNotFoundException e) {
+            assertEquals("Exploratory for user with name not found", e.getMessage());
+        }
 
-		verify(computationalDAO, never()).addComputational(USER, EXPLORATORY_NAME, sparkClusterResource);
-		verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
-				"self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(configuration, computationalDAO, exploratoryDAO);
-	}
+        verify(computationalDAO, never()).addComputational(USER, EXPLORATORY_NAME, PROJECT, sparkClusterResource);
+        verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
+                "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(configuration, computationalDAO, exploratoryDAO);
+    }
 
 	@Test
 	public void createSparkClusterWhenMethodNewComputationalCreateThrowsException() {
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(SparkStandaloneClusterCreateForm.class));
+        doThrow(new DlabException("Cannot create instance of resource class "))
+                .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class));
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
-		try {
-			computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
-		} catch (DlabException e) {
-			assertEquals("Cannot create instance of resource class ", e.getMessage());
-		}
-		verify(computationalDAO).addComputational(USER, EXPLORATORY_NAME, sparkClusterResource);
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newComputationalCreate(userInfo, userInstance, sparkClusterCreateForm);
-		verifyNoMoreInteractions(configuration, computationalDAO, exploratoryDAO, requestBuilder);
-	}
+        SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+        try {
+            computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Cannot create instance of resource class ", e.getMessage());
+        }
+        verify(projectService).get(PROJECT);
+        verify(computationalDAO).addComputational(USER, EXPLORATORY_NAME, PROJECT, sparkClusterResource);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newComputationalCreate(userInfo, projectDTO, userInstance, sparkClusterCreateForm, endpointDTO());
+        verifyNoMoreInteractions(projectService, configuration, computationalDAO, exploratoryDAO, requestBuilder);
+    }
 
 	@Test
 	public void terminateComputationalEnvironment() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		String explId = "explId";
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
+        String explId = "explId";
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		String compId = "compId";
-		UserComputationalResource ucResource = new UserComputationalResource();
-		ucResource.setComputationalName(COMP_NAME);
-		ucResource.setImageName("dataengine-service");
-		ucResource.setComputationalId(compId);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+        String compId = "compId";
+        UserComputationalResource ucResource = new UserComputationalResource();
+        ucResource.setComputationalName(COMP_NAME);
+        ucResource.setImageName("dataengine-service");
+        ucResource.setComputationalId(compId);
+        when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
 
-		ComputationalTerminateDTO ctDto = new ComputationalTerminateDTO();
-		ctDto.setComputationalName(COMP_NAME);
-		ctDto.setExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class))).thenReturn(ctDto);
+        ComputationalTerminateDTO ctDto = new ComputationalTerminateDTO();
+        ctDto.setComputationalName(COMP_NAME);
+        ctDto.setExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), any(EndpointDTO.class))).thenReturn(ctDto);
 
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalTerminateDTO.class), any()))
-				.thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalTerminateDTO.class), any()))
+                .thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
+        computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(requestBuilder).newComputationalTerminate(userInfo, userInstance,ucResource);
+        verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
 
-		verify(provisioningService).post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC, TOKEN, ctDto,
-				String.class);
+        verify(provisioningService).post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC, TOKEN, ctDto,
+                String.class);
 
-		verify(requestId).put(USER, UUID);
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
-	}
+        verify(requestId).put(USER, UUID);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
+    }
 
 	@Test
 	public void terminateComputationalEnvironmentWhenMethodUpdateComputationalStatusThrowsException() {
@@ -270,11 +307,11 @@
 		when(computationalDAO.updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self")))
 				.thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
-		} catch (DlabException e) {
-			assertEquals("Could not update computational resource status", e.getMessage());
-		}
+        try {
+            computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        } catch (DlabException e) {
+            assertEquals("Could not update computational resource status", e.getMessage());
+        }
 
 		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
 		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
@@ -283,391 +320,384 @@
 
 	@Test
 	public void terminateComputationalEnvironmentWhenMethodFetchComputationalFieldsThrowsException() {
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		String explId = "explId";
-		when(exploratoryDAO.fetchExploratoryId(anyString(), anyString())).thenReturn(explId);
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		doThrow(new DlabException("Computational resource for user with exploratory name not found."))
-				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        doThrow(new DlabException("Computational resource for user with exploratory name not found."))
+                .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
-		} catch (DlabException e) {
-			assertEquals("Computational resource for user with exploratory name not found.", e.getMessage());
-		}
+        try {
+            computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        } catch (DlabException e) {
+            assertEquals("Computational resource for user with exploratory name not found.", e.getMessage());
+        }
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
-	}
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
+    }
 
 	@Test
 	public void terminateComputationalEnvironmentWhenMethodNewComputationalTerminateThrowsException() {
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		String explId = "explId";
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		String compId = "compId";
-		UserComputationalResource ucResource = new UserComputationalResource();
-		ucResource.setComputationalName(COMP_NAME);
-		ucResource.setImageName("dataengine-service");
-		ucResource.setComputationalId(compId);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+        String compId = "compId";
+        UserComputationalResource ucResource = new UserComputationalResource();
+        ucResource.setComputationalName(COMP_NAME);
+        ucResource.setImageName("dataengine-service");
+        ucResource.setComputationalId(compId);
+        when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
 
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class));
+        doThrow(new DlabException("Cannot create instance of resource class "))
+                .when(requestBuilder).newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), any(EndpointDTO.class));
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
-		} catch (DlabException e) {
-			assertEquals("Cannot create instance of resource class ", e.getMessage());
-		}
+        try {
+            computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        } catch (DlabException e) {
+            assertEquals("Cannot create instance of resource class ", e.getMessage());
+        }
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource);
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder);
-	}
+        verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder);
+    }
 
 	@Test
 	public void createDataEngineService() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(), any(UserComputationalResource.class)))
+                .thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ComputationalBase compBaseMocked = mock(ComputationalBase.class);
-		when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(ComputationalCreateFormDTO.class))).thenReturn(compBaseMocked);
+        ComputationalBase compBaseMocked = mock(ComputationalBase.class);
+        when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class)))
+                .thenReturn(compBaseMocked);
 
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		boolean creationResult =
-				computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, "");
-		assertTrue(creationResult);
+        boolean creationResult =
+                computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, PROJECT);
+        assertTrue(creationResult);
 
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
+        verify(projectService).get(PROJECT);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
 
-		verify(requestBuilder).newComputationalCreate(
-				refEq(userInfo), refEq(userInstance), any(ComputationalCreateFormDTO.class));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		verify(provisioningService)
-				.post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC, TOKEN,
-						compBaseMocked, String.class);
+        verify(requestBuilder).newComputationalCreate(
+                refEq(userInfo), refEq(projectDTO), refEq(userInstance), any(ComputationalCreateFormDTO.class), refEq(endpointDTO()));
 
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
-	}
+        verify(provisioningService)
+                .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC, TOKEN,
+                        compBaseMocked, String.class);
+
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
+    }
 
 	@Test
 	public void createDataEngineServiceWhenComputationalResourceNotAdded() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(false);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+        when(computationalDAO.addComputational(anyString(), anyString(), any(), any(UserComputationalResource.class)))
+                .thenReturn(false);
 
-		boolean creationResult = computationalService.createDataEngineService(userInfo, formList.get(1), ucResource,
-				"");
-		assertFalse(creationResult);
+        boolean creationResult = computationalService.createDataEngineService(userInfo, formList.get(1), ucResource,
+                PROJECT);
+        assertFalse(creationResult);
 
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
+        verifyNoMoreInteractions(computationalDAO);
+    }
 
 	@Test
 	public void createDataEngineServiceWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(true);
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(), any(UserComputationalResource.class)))
+                .thenReturn(true);
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, "");
-		} catch (DlabException e) {
-			assertEquals("Exploratory for user with name not found", e.getMessage());
-		}
+        try {
+            computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Exploratory for user with name not found", e.getMessage());
+        }
 
-		verify(computationalDAO, never())
-				.addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
+        verify(computationalDAO, never())
+                .addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
-				"self"));
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
-	}
+        verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
+                "self"));
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
+    }
 
 	@Test
 	public void createDataEngineServiceWhenMethodNewComputationalCreateThrowsException() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.addComputational(anyString(), anyString(), any(), any(UserComputationalResource.class)))
+                .thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(ComputationalCreateFormDTO.class));
+        doThrow(new DlabException("Cannot create instance of resource class "))
+                .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class));
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		ComputationalCreateFormDTO computationalCreateFormDTO = formList.get(1);
-		try {
-			computationalService.createDataEngineService(userInfo, computationalCreateFormDTO, ucResource, "");
-		} catch (DlabException e) {
-			assertEquals("Could not send request for creation the computational resource compName: " +
-					"Cannot create instance of resource class ", e.getMessage());
-		}
+        ComputationalCreateFormDTO computationalCreateFormDTO = formList.get(1);
+        try {
+            computationalService.createDataEngineService(userInfo, computationalCreateFormDTO, ucResource, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Could not send request for creation the computational resource compName: " +
+                    "Cannot create instance of resource class ", e.getMessage());
+        }
 
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newComputationalCreate(
-				refEq(userInfo), refEq(userInstance), refEq(computationalCreateFormDTO));
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verify(projectService).get(PROJECT);
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newComputationalCreate(
+                refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(computationalCreateFormDTO), refEq(endpointDTO()));
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
 
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder);
-	}
+        verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder);
+    }
 
 	@Test
 	public void stopSparkCluster() {
-		final UserInstanceDTO exploratory = getUserInstanceDto();
-		exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE)));
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        final UserInstanceDTO exploratory = getUserInstanceDto();
+        exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE)));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		ComputationalStopDTO computationalStopDTO = new ComputationalStopDTO();
-		when(requestBuilder.newComputationalStop(any(UserInfo.class), any(UserInstanceDTO.class), anyString()))
-				.thenReturn(computationalStopDTO);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
-				.thenReturn("someUuid");
-		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
+        ComputationalStopDTO computationalStopDTO = new ComputationalStopDTO();
+        when(requestBuilder.newComputationalStop(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
+                any(EndpointDTO.class))).thenReturn(computationalStopDTO);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
+                .thenReturn("someUuid");
+        when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 
-		computationalService.stopSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME);
+        computationalService.stopSparkCluster(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStopping, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verify(requestBuilder).newComputationalStop(refEq(userInfo), refEq(exploratory), eq(COMP_NAME));
-		verify(provisioningService)
-				.post(eq(endpointDTO().getUrl() + "computational/stop/spark"), eq(TOKEN), refEq(computationalStopDTO),
-						eq(String.class));
-		verify(requestId).put(USER, "someUuid");
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
-				provisioningService, requestId);
-	}
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStopping, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verify(requestBuilder).newComputationalStop(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
+        verify(provisioningService)
+                .post(eq(endpointDTO().getUrl() + "computational/stop/spark"), eq(TOKEN), refEq(computationalStopDTO),
+                        eq(String.class));
+        verify(requestId).put(USER, "someUuid");
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
+                provisioningService, requestId);
+    }
 
 	@Test
 	public void stopSparkClusterWhenDataengineTypeIsAnother() {
-		final UserInstanceDTO exploratory = getUserInstanceDto();
-		exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE_SERVICE)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
-		expectedException.expect(IllegalStateException.class);
-		expectedException.expectMessage("There is no running dataengine compName for exploratory expName");
+        final UserInstanceDTO exploratory = getUserInstanceDto();
+        exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE_SERVICE)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+        expectedException.expect(IllegalStateException.class);
+        expectedException.expectMessage("There is no running dataengine compName for exploratory expName");
 
-		computationalService.stopSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME);
-	}
+        computationalService.stopSparkCluster(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+    }
 
 	@Test
 	public void startSparkCluster() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		final UserInstanceDTO exploratory = getUserInstanceDto();
-		exploratory.setResources(singletonList(getUserComputationalResource(STOPPED, DOCKER_DLAB_DATAENGINE)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        final UserInstanceDTO exploratory = getUserInstanceDto();
+        exploratory.setResources(singletonList(getUserComputationalResource(STOPPED, DOCKER_DLAB_DATAENGINE)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		ComputationalStartDTO computationalStartDTO = new ComputationalStartDTO();
-		when(requestBuilder.newComputationalStart(any(UserInfo.class), any(UserInstanceDTO.class), anyString()))
-				.thenReturn(computationalStartDTO);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
-				.thenReturn("someUuid");
-		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
+        ComputationalStartDTO computationalStartDTO = new ComputationalStartDTO();
+        when(requestBuilder.newComputationalStart(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
+                any(EndpointDTO.class))).thenReturn(computationalStartDTO);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
+                .thenReturn("someUuid");
+        when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 
-		computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, "");
+        computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, PROJECT);
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStarting, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verify(requestBuilder).newComputationalStart(refEq(userInfo), refEq(exploratory), eq(COMP_NAME));
-		verify(provisioningService)
-				.post(eq(endpointDTO().getUrl() + "computational/start/spark"), eq(TOKEN),
-						refEq(computationalStartDTO),
-						eq(String.class));
-		verify(requestId).put(USER, "someUuid");
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
-				provisioningService, requestId);
-	}
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStarting, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verify(requestBuilder).newComputationalStart(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
+        verify(provisioningService)
+                .post(eq(endpointDTO().getUrl() + "computational/start/spark"), eq(TOKEN),
+                        refEq(computationalStartDTO),
+                        eq(String.class));
+        verify(requestId).put(USER, "someUuid");
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
+                provisioningService, requestId);
+    }
 
 	@Test
 	public void startSparkClusterWhenDataengineStatusIsRunning() {
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		userInstanceDto.setResources(singletonList(getUserComputationalResource(RUNNING,
-				DOCKER_DLAB_DATAENGINE_SERVICE)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        userInstanceDto.setResources(singletonList(getUserComputationalResource(RUNNING,
+                DOCKER_DLAB_DATAENGINE_SERVICE)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
 
-		expectedException.expect(IllegalStateException.class);
-		expectedException.expectMessage("There is no stopped dataengine compName for exploratory expName");
+        expectedException.expect(IllegalStateException.class);
+        expectedException.expectMessage("There is no stopped dataengine compName for exploratory expName");
 
-		computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, "");
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void updateComputationalsReuploadKeyFlag() {
-		doNothing().when(computationalDAO).updateReuploadKeyFlagForComputationalResources(anyString(), any(List.class),
-				any(List.class), anyBoolean(), anyVararg());
-
-		computationalService.updateComputationalsReuploadKeyFlag(USER, singletonList(RUNNING),
-				singletonList(DataEngineType.SPARK_STANDALONE), true, RUNNING);
-
-		verify(computationalDAO).updateReuploadKeyFlagForComputationalResources(USER, singletonList
-						(RUNNING),
-				singletonList(DataEngineType.SPARK_STANDALONE), true, RUNNING);
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, PROJECT);
+    }
 
 	@Test
 	public void getComputationalResource() {
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+        when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
 
-		Optional<UserComputationalResource> expectedResource = Optional.of(ucResource);
-		Optional<UserComputationalResource> actualResource =
-				computationalService.getComputationalResource(USER, EXPLORATORY_NAME, COMP_NAME);
-		assertEquals(expectedResource, actualResource);
+        Optional<UserComputationalResource> expectedResource = Optional.of(ucResource);
+        Optional<UserComputationalResource> actualResource =
+                computationalService.getComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        assertEquals(expectedResource, actualResource);
 
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        verifyNoMoreInteractions(computationalDAO);
+    }
 
 	@Test
 	public void getComputationalResourceWithException() {
-		doThrow(new DlabException("Computational resource not found"))
-				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
+        doThrow(new DlabException("Computational resource not found"))
+                .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
 
-		Optional<UserComputationalResource> expectedResource = Optional.empty();
-		Optional<UserComputationalResource> actualResource =
-				computationalService.getComputationalResource(USER, EXPLORATORY_NAME, COMP_NAME);
-		assertEquals(expectedResource, actualResource);
+        Optional<UserComputationalResource> expectedResource = Optional.empty();
+        Optional<UserComputationalResource> actualResource =
+                computationalService.getComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        assertEquals(expectedResource, actualResource);
 
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        verifyNoMoreInteractions(computationalDAO);
+    }
 
 	@Test
 	public void testUpdateSparkClusterConfig() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		final ComputationalClusterConfigDTO clusterConfigDTO = new ComputationalClusterConfigDTO();
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
-		userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(RUNNING, COMP_NAME)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
-		when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(ClusterConfig.class))).thenReturn(clusterConfigDTO);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalClusterConfigDTO.class), any()))
-				.thenReturn("someUuid");
-		computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME,
-				config);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        final ComputationalClusterConfigDTO clusterConfigDTO = new ComputationalClusterConfigDTO();
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+        userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(RUNNING, COMP_NAME)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(ClusterConfig.class), any(EndpointDTO.class)))
+                .thenReturn(clusterConfigDTO);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalClusterConfigDTO.class), any()))
+                .thenReturn("someUuid");
+        computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                COMP_NAME, config);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(userInstanceDto),
-				refEq(getUserComputationalResource(RUNNING, COMP_NAME)),
-				eq(Collections.singletonList(new ClusterConfig())));
-		verify(requestId).put(USER, "someUuid");
-		verify(computationalDAO).updateComputationalFields(refEq(new ComputationalStatusDTO()
-				.withConfig(config)
-				.withUser(USER)
-				.withExploratoryName(EXPLORATORY_NAME)
-				.withComputationalName(COMP_NAME)
-				.withStatus(UserInstanceStatus.RECONFIGURING.toString()), "self"));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "computational/spark/reconfigure"),
-				eq(getUserInfo().getAccessToken()),
-				refEq(new ComputationalClusterConfigDTO()), eq(String.class));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(userInstanceDto),
+                refEq(getUserComputationalResource(RUNNING, COMP_NAME)),
+                eq(Collections.singletonList(new ClusterConfig())), eq(endpointDTO()));
+        verify(requestId).put(USER, "someUuid");
+        verify(computationalDAO).updateComputationalFields(refEq(new ComputationalStatusDTO()
+                .withProject(PROJECT)
+                .withConfig(config)
+                .withUser(USER)
+                .withExploratoryName(EXPLORATORY_NAME)
+                .withComputationalName(COMP_NAME)
+                .withStatus(UserInstanceStatus.RECONFIGURING.toString()), "self"));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "computational/spark/reconfigure"),
+                eq(getUserInfo().getAccessToken()),
+                refEq(new ComputationalClusterConfigDTO()), eq(String.class));
 
-	}
+    }
 
 	@Test
 	public void testUpdateSparkClusterConfigWhenClusterIsNotRunning() {
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
-		userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
-		try {
-			computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME,
-					config);
-		} catch (ResourceNotFoundException e) {
-			assertEquals("Running computational resource with name compName for exploratory expName not found",
-					e.getMessage());
-		}
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+        userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        try {
+            computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                    COMP_NAME, config);
+        } catch (ResourceNotFoundException e) {
+            assertEquals("Running computational resource with name compName for exploratory expName not found",
+                    e.getMessage());
+        }
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(provisioningService, requestBuilder, requestId);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verifyNoMoreInteractions(exploratoryDAO);
+        verifyZeroInteractions(provisioningService, requestBuilder, requestId);
 
-	}
+    }
 
 	@Test
 	public void testUpdateSparkClusterConfigWhenClusterIsNotFound() {
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
-		userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
-		try {
-			computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME + "X",
-					config);
-		} catch (ResourceNotFoundException e) {
-			assertEquals("Running computational resource with name compNameX for exploratory expName not found",
-					e.getMessage());
-		}
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+        userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        try {
+            computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                    COMP_NAME + "X", config);
+        } catch (ResourceNotFoundException e) {
+            assertEquals("Running computational resource with name compNameX for exploratory expName not found",
+                    e.getMessage());
+        }
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(provisioningService, requestBuilder, requestId);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verifyNoMoreInteractions(exploratoryDAO);
+        verifyZeroInteractions(provisioningService, requestBuilder, requestId);
 
-	}
+    }
 
 	@Test
 	public void testGetClusterConfig() {
-		when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
+        when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
 
-		final List<ClusterConfig> clusterConfig = computationalService.getClusterConfig(getUserInfo(),
-				EXPLORATORY_NAME, COMP_NAME);
-		final ClusterConfig config = clusterConfig.get(0);
+        final List<ClusterConfig> clusterConfig = computationalService.getClusterConfig(getUserInfo(), PROJECT,
+                EXPLORATORY_NAME, COMP_NAME);
+        final ClusterConfig config = clusterConfig.get(0);
 
-		assertEquals(1, clusterConfig.size());
-		assertEquals("test", config.getClassification());
-		assertNull(config.getConfigurations());
-		assertNull(config.getProperties());
-	}
+        assertEquals(1, clusterConfig.size());
+        assertEquals("test", config.getClassification());
+        assertNull(config.getConfigurations());
+        assertNull(config.getProperties());
+    }
 
 
 	@Test
 	public void testGetClusterConfigWithException() {
-		when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString())).thenThrow(new RuntimeException(
-				"Exception"));
+        when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString(), anyString())).thenThrow(new RuntimeException(
+                "Exception"));
 
-		expectedException.expectMessage("Exception");
-		expectedException.expect(RuntimeException.class);
-		computationalService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME);
-	}
+        expectedException.expectMessage("Exception");
+        expectedException.expect(RuntimeException.class);
+        computationalService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME, COMP_NAME);
+    }
 
 	private ClusterConfig getClusterConfig() {
 		final ClusterConfig config = new ClusterConfig();
@@ -681,26 +711,29 @@
 
 	private UserInstanceDTO getUserInstanceDto() {
 		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
-				.withExploratoryId("explId")
+                .withExploratoryId("explId")
+                .withProject(PROJECT)
 				.withTags(Collections.emptyMap());
 	}
 
 	private List<ComputationalCreateFormDTO> getFormList() {
-		SparkStandaloneClusterCreateForm sparkClusterForm = new SparkStandaloneClusterCreateForm();
-		sparkClusterForm.setNotebookName(EXPLORATORY_NAME);
-		sparkClusterForm.setName(COMP_NAME);
-		sparkClusterForm.setDataEngineInstanceCount(String.valueOf(2));
-		sparkClusterForm.setImage("dataengine");
-		ComputationalCreateFormDTO desClusterForm = new ComputationalCreateFormDTO();
-		desClusterForm.setNotebookName(EXPLORATORY_NAME);
-		desClusterForm.setName(COMP_NAME);
+        SparkStandaloneClusterCreateForm sparkClusterForm = new SparkStandaloneClusterCreateForm();
+        sparkClusterForm.setNotebookName(EXPLORATORY_NAME);
+        sparkClusterForm.setName(COMP_NAME);
+        sparkClusterForm.setProject(PROJECT);
+        sparkClusterForm.setDataEngineInstanceCount(String.valueOf(2));
+        sparkClusterForm.setImage("dataengine");
+        ComputationalCreateFormDTO desClusterForm = new ComputationalCreateFormDTO();
+        desClusterForm.setNotebookName(EXPLORATORY_NAME);
+        desClusterForm.setName(COMP_NAME);
 
-		return Arrays.asList(sparkClusterForm, desClusterForm);
-	}
+        return Arrays.asList(sparkClusterForm, desClusterForm);
+    }
 
 	private ComputationalStatusDTO getComputationalStatusDTOWithStatus(String status) {
 		return new ComputationalStatusDTO()
-				.withUser(USER)
+                .withUser(USER)
+                .withProject(PROJECT)
 				.withExploratoryName(EXPLORATORY_NAME)
 				.withComputationalName(COMP_NAME)
 				.withStatus(UserInstanceStatus.of(status));
@@ -717,7 +750,7 @@
 	}
 
 	private EndpointDTO endpointDTO() {
-		return new EndpointDTO("test", "url", "", null);
+		return new EndpointDTO("test", "url", "", null, EndpointDTO.EndpointStatus.ACTIVE, CloudProvider.AWS);
 	}
 
 
@@ -737,4 +770,9 @@
 		return ucResource;
 	}
 
+	private ProjectDTO getProjectDTO() {
+        return new ProjectDTO(PROJECT, Collections.emptySet(), "", "", null,
+                singletonList(new ProjectEndpointDTO("endpoint", UserInstanceStatus.RUNNING,
+                        new EdgeInfo())), true);
+    }
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EdgeServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EdgeServiceImplTest.java
deleted file mode 100644
index 09784eb..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EdgeServiceImplTest.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.impl;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.KeyDAO;
-import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.dto.ResourceSysBaseDTO;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
-
-@RunWith(MockitoJUnitRunner.class)
-public class EdgeServiceImplTest {
-
-	private final String USER = "test";
-	private final String TOKEN = "token";
-	private final String UUID = "1234-56789765-4321";
-	private final String STATUS_STOPPED = "stopped";
-	private final String STATUS_RUNNING = "running";
-	private UserInfo userInfo;
-
-	@Mock
-	private KeyDAO keyDAO;
-	@Mock
-	private RESTService provisioningService;
-	@Mock
-	private RequestBuilder requestBuilder;
-	@Mock
-	private RequestId requestId;
-
-	@InjectMocks
-	private EdgeServiceImpl edgeService;
-
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
-
-	@Before
-	public void setUp() {
-		userInfo = getUserInfo();
-	}
-
-	@Test
-	public void start() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_STOPPED);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-		ResourceSysBaseDTO rsbDto = new ResourceSysBaseDTO();
-		when(requestBuilder.newEdgeAction(any(UserInfo.class))).thenReturn(rsbDto);
-		String edgeStart = "infrastructure/edge/start";
-		when(provisioningService.post(anyString(), anyString(), any(ResourceSysBaseDTO.class), any()))
-				.thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
-
-		String uuid = edgeService.start(userInfo);
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
-
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(keyDAO).updateEdgeStatus(USER, "starting");
-		verify(requestBuilder).newEdgeAction(userInfo);
-		verify(provisioningService).post(edgeStart, TOKEN, rsbDto, String.class);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(keyDAO, requestBuilder, provisioningService, requestId);
-	}
-
-	@Test
-	public void startWithInappropriateEdgeStatus() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_RUNNING);
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not start EDGE node because the status of instance is running");
-
-		edgeService.start(userInfo);
-	}
-
-	@Test
-	public void startWhenMethodNewEdgeActionThrowsException() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_STOPPED);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newEdgeAction(any(UserInfo.class));
-		try {
-			edgeService.start(userInfo);
-		} catch (DlabException e) {
-			assertEquals("Could not start EDGE node: Could not infrastructure/edge/start EDGE node : " +
-					"Cannot create instance of resource class ", e.getMessage());
-		}
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(keyDAO).updateEdgeStatus(USER, "starting");
-		verify(keyDAO).updateEdgeStatus(USER, "failed");
-		verify(requestBuilder).newEdgeAction(userInfo);
-		verifyNoMoreInteractions(keyDAO, requestBuilder);
-	}
-
-	@Test
-	public void stop() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_RUNNING);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-		ResourceSysBaseDTO rsbDto = new ResourceSysBaseDTO();
-		when(requestBuilder.newEdgeAction(any(UserInfo.class))).thenReturn(rsbDto);
-		String edgeStop = "infrastructure/edge/stop";
-		when(provisioningService.post(anyString(), anyString(), any(ResourceSysBaseDTO.class), any())).thenReturn
-				(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
-
-		String uuid = edgeService.stop(userInfo);
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
-
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(keyDAO).updateEdgeStatus(USER, "stopping");
-		verify(requestBuilder).newEdgeAction(userInfo);
-		verify(provisioningService).post(edgeStop, TOKEN, rsbDto, String.class);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(keyDAO, requestBuilder, provisioningService, requestId);
-	}
-
-	@Test
-	public void stopWithInappropriateEdgeStatus() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_STOPPED);
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not stop EDGE node because the status of instance is stopped");
-
-		edgeService.stop(userInfo);
-	}
-
-	@Test
-	public void stopWhenMethodNewEdgeActionThrowsException() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_RUNNING);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newEdgeAction(any(UserInfo.class));
-		try {
-			edgeService.stop(userInfo);
-		} catch (DlabException e) {
-			assertEquals("Could not stop EDGE node: Could not infrastructure/edge/stop EDGE node : " +
-					"Cannot create instance of resource class ", e.getMessage());
-		}
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(keyDAO).updateEdgeStatus(USER, "stopping");
-		verify(keyDAO).updateEdgeStatus(USER, "failed");
-		verify(requestBuilder).newEdgeAction(userInfo);
-		verifyNoMoreInteractions(keyDAO, requestBuilder);
-	}
-
-	@Test
-	public void terminate() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_RUNNING);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-		ResourceSysBaseDTO rsbDto = new ResourceSysBaseDTO();
-		when(requestBuilder.newEdgeAction(any(UserInfo.class))).thenReturn(rsbDto);
-		String edgeTerminate = "infrastructure/edge/terminate";
-		when(provisioningService.post(anyString(), anyString(), any(ResourceSysBaseDTO.class), any()))
-				.thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
-
-		String uuid = edgeService.terminate(userInfo);
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
-
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(keyDAO).updateEdgeStatus(USER, "terminating");
-		verify(requestBuilder).newEdgeAction(userInfo);
-		verify(provisioningService).post(edgeTerminate, TOKEN, rsbDto, String.class);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(keyDAO, requestBuilder, provisioningService, requestId);
-	}
-
-	@Test
-	public void terminateWithInappropriateEdgeStatus() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(anyString());
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Could not terminate EDGE node because the status of instance is null");
-
-		edgeService.terminate(userInfo);
-	}
-
-	@Test
-	public void terminateWhenMethodNewEdgeActionThrowsException() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STATUS_RUNNING);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newEdgeAction(any(UserInfo.class));
-		try {
-			edgeService.terminate(userInfo);
-		} catch (DlabException e) {
-			assertEquals("Could not terminate EDGE node: Could not infrastructure/edge/terminate EDGE node : " +
-					"Cannot create instance of resource class ", e.getMessage());
-		}
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(keyDAO).updateEdgeStatus(USER, "terminating");
-		verify(keyDAO).updateEdgeStatus(USER, "failed");
-		verify(requestBuilder).newEdgeAction(userInfo);
-		verifyNoMoreInteractions(keyDAO, requestBuilder);
-	}
-
-	@Test
-	public void updateReuploadKeyFlag() {
-		doNothing().when(keyDAO).updateEdgeReuploadKey(anyString(), anyBoolean(), anyVararg());
-		edgeService.updateReuploadKeyFlag(USER, true, UserInstanceStatus.RUNNING);
-
-		verify(keyDAO).updateEdgeReuploadKey(USER, true, UserInstanceStatus.RUNNING);
-		verifyNoMoreInteractions(keyDAO);
-	}
-
-	private UserInfo getUserInfo() {
-		return new UserInfo(USER, TOKEN);
-	}
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
index 1df2245..460c2f9 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
@@ -22,21 +22,18 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.EnvDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
 import com.epam.dlab.backendapi.dao.UserSettingsDAO;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
 import com.epam.dlab.backendapi.resources.dto.UserDTO;
 import com.epam.dlab.backendapi.service.ComputationalService;
-import com.epam.dlab.backendapi.service.EdgeService;
 import com.epam.dlab.backendapi.service.ExploratoryService;
-import com.epam.dlab.backendapi.service.SecurityService;
 import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.backendapi.service.SecurityService;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.base.edge.EdgeInfo;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.exceptions.ResourceConflictException;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -45,11 +42,24 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anySet;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class EnvironmentServiceImplTest {
@@ -59,8 +69,6 @@
 	private static final String EXPLORATORY_NAME_2 = "expName2";
 	private static final String TOKEN = "token";
 	private static final String UUID = "213-12312-321";
-	private static final String RUNNING_STATE = "running";
-	private static final String STOPPED_STATE = "stopped";
 	private static final String PROJECT_NAME = "projectName";
 	private static final String ENDPOINT_NAME = "endpointName";
 	private static final String ADMIN = "admin";
@@ -76,10 +84,6 @@
 	@Mock
 	private ComputationalService computationalService;
 	@Mock
-	private EdgeService edgeService;
-	@Mock
-	private KeyDAO keyDAO;
-	@Mock
 	private UserSettingsDAO userSettingsDAO;
 	@Mock
 	private ProjectService projectService;
@@ -118,93 +122,6 @@
 		environmentService.getUsers();
 	}
 
-	@Test
-	public void getAllUsers() {
-		doReturn(Collections.singleton(USER)).when(envDAO).fetchAllUsers();
-		final Set<String> users = environmentService.getUserNames();
-
-		assertEquals(1, users.size());
-		assertTrue(users.contains(USER));
-
-		verify(envDAO).fetchAllUsers();
-		verifyNoMoreInteractions(envDAO);
-	}
-
-	@Test
-	public void getAllUsersWithException() {
-		doThrow(new DlabException("Users not found")).when(envDAO).fetchAllUsers();
-
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Users not found");
-
-		environmentService.getUserNames();
-	}
-
-
-	@Test
-	public void stopEnvironment() {
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString())).thenReturn(getUserInstances());
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(RUNNING_STATE);
-		when(edgeService.stop(any(UserInfo.class))).thenReturn(UUID);
-
-		environmentService.stopEnvironment(USER);
-
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
-		verify(securityService, times(3)).getUserInfoOffline(USER);
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
-		verify(keyDAO, times(2)).getEdgeStatus(USER);
-		verify(edgeService).stop(refEq(userInfo));
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(keyDAO, exploratoryDAO, edgeService, exploratoryService);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void stopEnvironmentWithWrongResourceState() {
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg()))
-				.thenReturn(getUserInstances());
-		expectedException.expect(ResourceConflictException.class);
-
-		environmentService.stopEnvironment(USER);
-	}
-
-	@Test
-	public void stopEnvironmentWithEdgeStarting() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn("starting");
-		expectedException.expect(ResourceConflictException.class);
-
-		environmentService.stopEnvironment(USER);
-	}
-
-	@Test
-	public void stopEnvironmentWithoutEdge() {
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString())).thenReturn(getUserInstances());
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(STOPPED_STATE);
-		when(edgeService.stop(any(UserInfo.class))).thenReturn(UUID);
-
-		environmentService.stopEnvironment(USER);
-
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
-		verify(securityService, times(2)).getUserInfoOffline(USER);
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
-		verify(keyDAO, times(2)).getEdgeStatus(USER);
-		verify(edgeService, never()).stop(refEq(userInfo));
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(keyDAO, envDAO, exploratoryDAO, edgeService, exploratoryService);
-	}
 
 	@Test
 	public void stopProjectEnvironment() {
@@ -212,15 +129,15 @@
 		final ProjectDTO projectDTO = getProjectDTO();
 		when(exploratoryDAO.fetchRunningExploratoryFieldsForProject(anyString())).thenReturn(getUserInstances());
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(userInfo);
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
 		when(projectService.get(anyString())).thenReturn(projectDTO);
 		doNothing().when(projectService).stop(any(UserInfo.class), anyString(), anyString());
 
 		environmentService.stopProjectEnvironment(PROJECT_NAME);
 
 		verify(exploratoryDAO).fetchRunningExploratoryFieldsForProject(PROJECT_NAME);
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
+		verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
+		verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_2));
 		verify(securityService, times(2)).getServiceAccountInfo(USER);
 		verify(securityService).getServiceAccountInfo(ADMIN);
 		verify(projectService).get(eq(PROJECT_NAME));
@@ -232,232 +149,48 @@
 	}
 
 	@Test
-	public void stopEdge() {
-		final UserInfo userInfo = getUserInfo();
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn(RUNNING_STATE);
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(edgeService.stop(any(UserInfo.class))).thenReturn(UUID);
-
-		environmentService.stopEdge(USER);
-
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(securityService).getUserInfoOffline(USER);
-		verify(edgeService).stop(refEq(userInfo));
-		verifyNoMoreInteractions(keyDAO, securityService, edgeService);
-	}
-
-	@Test
-	public void stopEdgeWhenItIsNotRunning() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn("starting");
-
-		environmentService.stopEdge(USER);
-
-		verify(keyDAO).getEdgeStatus(USER);
-		verifyZeroInteractions(securityService, edgeService);
-		verifyNoMoreInteractions(keyDAO);
-	}
-
-	@Test
 	public void stopExploratory() {
 		final UserInfo userInfo = getUserInfo();
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
 
-		environmentService.stopExploratory(USER, EXPLORATORY_NAME_1);
+		environmentService.stopExploratory(new UserInfo(USER, TOKEN), USER, PROJECT_NAME, EXPLORATORY_NAME_1);
 
-		verify(securityService).getUserInfoOffline(USER);
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
+		verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
 		verifyNoMoreInteractions(securityService, exploratoryService);
 	}
 
 	@Test
 	public void stopComputational() {
 		final UserInfo userInfo = getUserInfo();
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		doNothing().when(computationalService).stopSparkCluster(any(UserInfo.class), anyString(), anyString());
+		doNothing().when(computationalService).stopSparkCluster(any(UserInfo.class), anyString(), anyString(), anyString());
 
-		environmentService.stopComputational(USER, EXPLORATORY_NAME_1, "compName");
+		environmentService.stopComputational(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1, "compName");
 
-		verify(securityService).getUserInfoOffline(USER);
-		verify(computationalService).stopSparkCluster(refEq(userInfo), eq(EXPLORATORY_NAME_1), eq("compName"));
+		verify(computationalService).stopSparkCluster(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1), eq("compName"));
 		verifyNoMoreInteractions(securityService, computationalService);
 	}
 
 	@Test
-	@SuppressWarnings("unchecked")
-	public void terminateAll() {
-		doReturn(Collections.singleton(USER)).when(envDAO).fetchAllUsers();
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg()))
-				.thenReturn(Collections.emptyList());
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn(UUID);
-		when(keyDAO.edgeNodeExist(anyString())).thenReturn(true);
-		when(edgeService.terminate(any(UserInfo.class))).thenReturn(UUID);
-
-		environmentService.terminateAll();
-
-		verify(envDAO).fetchAllUsers();
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg());
-		verify(securityService).getUserInfoOffline(USER);
-		verify(keyDAO).edgeNodeExist(USER);
-		verify(edgeService).terminate(refEq(userInfo));
-		verify(exploratoryService).updateExploratoryStatuses(USER, UserInstanceStatus.TERMINATING);
-		verify(keyDAO).getEdgeStatus(userInfo.getName());
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(keyDAO, envDAO, exploratoryDAO, edgeService, exploratoryService);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void terminateAllWithoutEdge() {
-		doReturn(Collections.singleton(USER)).when(envDAO).fetchAllUsers();
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class),
-				eq(UserInstanceStatus.CREATING), eq(UserInstanceStatus.STARTING))).thenReturn(Collections.emptyList());
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), eq(UserInstanceStatus.TERMINATED),
-				eq(UserInstanceStatus.FAILED), eq(UserInstanceStatus.TERMINATING))).thenReturn(getUserInstances());
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn(UUID);
-		when(keyDAO.edgeNodeExist(anyString())).thenReturn(false);
-		when(edgeService.terminate(any(UserInfo.class))).thenReturn(UUID);
-
-		environmentService.terminateAll();
-
-		verify(envDAO).fetchAllUsers();
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER, UserInstanceStatus.TERMINATED,
-				UserInstanceStatus.FAILED, UserInstanceStatus.TERMINATING);
-		verify(securityService, times(2)).getUserInfoOffline(USER);
-		verify(exploratoryService).terminate(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).terminate(refEq(userInfo), eq(EXPLORATORY_NAME_2));
-		verify(keyDAO).edgeNodeExist(USER);
-		verify(edgeService, never()).terminate(refEq(userInfo));
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(keyDAO, envDAO, exploratoryDAO, edgeService, exploratoryService);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void terminateAllWithWrongResourceState() {
-		doReturn(Collections.singleton(USER)).when(envDAO).fetchAllUsers();
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg()))
-				.thenReturn(getUserInstances());
-		expectedException.expect(ResourceConflictException.class);
-
-		environmentService.terminateAll();
-	}
-
-	@Test
-	public void terminateAllWithEdgeStarting() {
-		doReturn(Collections.singleton(USER)).when(envDAO).fetchAllUsers();
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn("starting");
-		expectedException.expect(ResourceConflictException.class);
-
-		environmentService.terminateAll();
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void terminateEnvironment() {
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg()))
-				.thenReturn(Collections.emptyList());
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn(UUID);
-		when(keyDAO.edgeNodeExist(anyString())).thenReturn(true);
-		when(edgeService.terminate(any(UserInfo.class))).thenReturn(UUID);
-
-		environmentService.terminateEnvironment(USER);
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg());
-		verify(securityService).getUserInfoOffline(USER);
-		verify(keyDAO).edgeNodeExist(USER);
-		verify(edgeService).terminate(refEq(userInfo));
-		verify(exploratoryService).updateExploratoryStatuses(USER, UserInstanceStatus.TERMINATING);
-		verify(keyDAO).getEdgeStatus(userInfo.getName());
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(keyDAO, envDAO, exploratoryDAO, edgeService, exploratoryService);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void terminateEnvironmentWithoutEdge() {
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class),
-				eq(UserInstanceStatus.CREATING), eq(UserInstanceStatus.STARTING))).thenReturn(Collections.emptyList());
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), eq(UserInstanceStatus.TERMINATED),
-				eq(UserInstanceStatus.FAILED), eq(UserInstanceStatus.TERMINATING))).thenReturn(getUserInstances());
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn(UUID);
-		when(keyDAO.edgeNodeExist(anyString())).thenReturn(false);
-		when(edgeService.terminate(any(UserInfo.class))).thenReturn(UUID);
-
-		environmentService.terminateEnvironment(USER);
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER, UserInstanceStatus.TERMINATED,
-				UserInstanceStatus.FAILED, UserInstanceStatus.TERMINATING);
-		verify(securityService, times(2)).getUserInfoOffline(USER);
-		verify(exploratoryService).terminate(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).terminate(refEq(userInfo), eq(EXPLORATORY_NAME_2));
-		verify(keyDAO).edgeNodeExist(USER);
-		verify(edgeService, never()).terminate(refEq(userInfo));
-		verify(keyDAO).getEdgeStatus(USER);
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(keyDAO, envDAO, exploratoryDAO, edgeService, exploratoryService);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void terminateEnvironmentWithWrongResourceState() {
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg()))
-				.thenReturn(getUserInstances());
-		expectedException.expect(ResourceConflictException.class);
-
-		environmentService.terminateEnvironment(USER);
-	}
-
-	@Test
-	public void terminateEnvironmentWithEdgeStarting() {
-		when(keyDAO.getEdgeStatus(anyString())).thenReturn("starting");
-		expectedException.expect(ResourceConflictException.class);
-
-		environmentService.terminateEnvironment(USER);
-	}
-
-	@Test
 	public void terminateExploratory() {
 		final UserInfo userInfo = getUserInfo();
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn(UUID);
+		when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
 
-		environmentService.terminateExploratory(USER, EXPLORATORY_NAME_1);
+		environmentService.terminateExploratory(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1);
 
-		verify(securityService).getUserInfoOffline(USER);
-		verify(exploratoryService).terminate(refEq(userInfo), eq(EXPLORATORY_NAME_1));
+		verify(exploratoryService).terminate(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
 		verifyNoMoreInteractions(securityService, exploratoryService);
 	}
 
 	@Test
 	public void terminateComputational() {
 		final UserInfo userInfo = getUserInfo();
-		when(securityService.getUserInfoOffline(anyString())).thenReturn(userInfo);
 		doNothing().when(computationalService)
-				.terminateComputational(any(UserInfo.class), anyString(), anyString());
+				.terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
 
-		environmentService.terminateComputational(USER, EXPLORATORY_NAME_1, "compName");
+		environmentService.terminateComputational(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1, "compName");
 
-		verify(securityService).getUserInfoOffline(USER);
 		verify(computationalService)
-				.terminateComputational(refEq(userInfo), eq(EXPLORATORY_NAME_1), eq("compName"));
+				.terminateComputational(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1), eq("compName"));
 		verifyNoMoreInteractions(securityService, computationalService);
 	}
 
@@ -467,13 +200,13 @@
 
 	private List<UserInstanceDTO> getUserInstances() {
 		return Arrays.asList(
-				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_1).withUser(USER).withProject("prj"),
-				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_2).withUser(USER).withProject("prj"));
+				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_1).withUser(USER).withProject(PROJECT_NAME),
+				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_2).withUser(USER).withProject(PROJECT_NAME));
 	}
 
 	private ProjectDTO getProjectDTO() {
 		return new ProjectDTO(PROJECT_NAME, Collections.emptySet(), "", "", null,
 				Collections.singletonList(new ProjectEndpointDTO(ENDPOINT_NAME, UserInstanceStatus.RUNNING,
-						new EdgeInfo())));
+						new EdgeInfo())), true);
 	}
 }
\ No newline at end of file
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
index aa02ccd..5d21167 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
@@ -24,16 +24,26 @@
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.GitCredsDAO;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.TagService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.StatusEnvBaseDTO;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
+import com.epam.dlab.dto.base.edge.EdgeInfo;
 import com.epam.dlab.dto.computational.UserComputationalResource;
-import com.epam.dlab.dto.exploratory.*;
+import com.epam.dlab.dto.exploratory.ExploratoryActionDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryCreateDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryGitCredsDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryGitCredsUpdateDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryReconfigureSparkClusterActionDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryStatusDTO;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.epam.dlab.model.exploratory.Exploratory;
@@ -55,27 +65,46 @@
 import static java.util.Collections.singletonList;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.anyMapOf;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.anyVararg;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class ExploratoryServiceImplTest {
 
-	private final String USER = "test";
-	private final String TOKEN = "token";
-	private final String EXPLORATORY_NAME = "expName";
-	private final String UUID = "1234-56789765-4321";
+    private final String USER = "test";
+    private final String TOKEN = "token";
+    private final String PROJECT = "project";
+    private final String EXPLORATORY_NAME = "expName";
+    private final String UUID = "1234-56789765-4321";
+    private static final String ENDPOINT_NAME = "endpointName";
 
-	private UserInfo userInfo;
-	private UserInstanceDTO userInstance;
-	private StatusEnvBaseDTO statusEnvBaseDTO;
 
-	@Mock
-	private ExploratoryDAO exploratoryDAO;
-	@Mock
-	private ComputationalDAO computationalDAO;
-	@Mock
-	private GitCredsDAO gitCredsDAO;
-	@Mock
+    private UserInfo userInfo;
+    private UserInstanceDTO userInstance;
+    private StatusEnvBaseDTO statusEnvBaseDTO;
+
+    @Mock
+    private ProjectService projectService;
+    @Mock
+    private ExploratoryDAO exploratoryDAO;
+    @Mock
+    private ComputationalDAO computationalDAO;
+    @Mock
+    private GitCredsDAO gitCredsDAO;
+    @Mock
 	private RESTService provisioningService;
 	@Mock
 	private RequestBuilder requestBuilder;
@@ -100,172 +129,176 @@
 
 	@Test
 	public void start() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ExploratoryGitCredsDTO egcDtoMock = mock(ExploratoryGitCredsDTO.class);
-		when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDtoMock);
+        ExploratoryGitCredsDTO egcDtoMock = mock(ExploratoryGitCredsDTO.class);
+        when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDtoMock);
 
-		ExploratoryActionDTO egcuDto = new ExploratoryGitCredsUpdateDTO();
-		egcuDto.withExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newExploratoryStart(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(ExploratoryGitCredsDTO.class))).thenReturn(egcuDto);
+        ExploratoryActionDTO egcuDto = new ExploratoryGitCredsUpdateDTO();
+        egcuDto.withExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newExploratoryStart(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                any(ExploratoryGitCredsDTO.class))).thenReturn(egcuDto);
 
-		String exploratoryStart = "exploratory/start";
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any()))
-				.thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        String exploratoryStart = "exploratory/start";
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any()))
+                .thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		String uuid = exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
+        String uuid = exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
+        assertNotNull(uuid);
+        assertEquals(UUID, uuid);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
 
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStart, TOKEN, egcuDto, String.class);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
-	}
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStart, TOKEN, egcuDto, String.class);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
+    }
 
 	@Test
 	public void startWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
-		try {
-			exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
-		} catch (DlabException e) {
-			assertEquals("Could not exploratory/start exploratory environment expName: Exploratory for user with " +
-					"name not found", e.getMessage());
-		}
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+        try {
+            exploratoryService.start(userInfo, EXPLORATORY_NAME, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Could not start exploratory environment expName: Exploratory for user with " +
+                    "name not found", e.getMessage());
+        }
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void stop() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
-		eaDto.withExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class))).thenReturn(eaDto);
+        ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
+        eaDto.withExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
+                .thenReturn(eaDto);
 
-		String exploratoryStop = "exploratory/stop";
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
-				(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        String exploratoryStop = "exploratory/stop";
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
+                (UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		String uuid = exploratoryService.stop(userInfo, EXPLORATORY_NAME);
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
+        String uuid = exploratoryService.stop(userInfo, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(uuid);
+        assertEquals(UUID, uuid);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
 
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStop, TOKEN, eaDto, String.class);
-		verify(computationalDAO).updateComputationalStatusesForExploratory(userInfo.getName(), EXPLORATORY_NAME,
-				UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING, UserInstanceStatus.FAILED,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
-	}
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStop, TOKEN, eaDto, String.class);
+        verify(computationalDAO).updateComputationalStatusesForExploratory(userInfo.getName(), PROJECT,
+                EXPLORATORY_NAME, UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING,
+                UserInstanceStatus.FAILED, UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
+    }
 
 	@Test
 	public void stopWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
-		try {
-			exploratoryService.stop(userInfo, EXPLORATORY_NAME);
-		} catch (DlabException e) {
-			assertEquals("Could not exploratory/stop exploratory environment expName: Exploratory for user with " +
-					"name not found", e.getMessage());
-		}
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+        try {
+            exploratoryService.stop(userInfo, PROJECT, EXPLORATORY_NAME);
+        } catch (DlabException e) {
+            assertEquals("Could not stop exploratory environment expName: Exploratory for user with " +
+                    "name not found", e.getMessage());
+        }
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void terminate() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
-		eaDto.withExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class))).thenReturn(eaDto);
+        ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
+        eaDto.withExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
+                .thenReturn(eaDto);
 
-		String exploratoryTerminate = "exploratory/terminate";
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
-				(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        String exploratoryTerminate = "exploratory/terminate";
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
+                (UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		String uuid = exploratoryService.terminate(userInfo, EXPLORATORY_NAME);
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
+        String uuid = exploratoryService.terminate(userInfo, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(uuid);
+        assertEquals(UUID, uuid);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
 
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME, UserInstanceStatus
-						.TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED,
-				UserInstanceStatus.FAILED);
-		verify(requestBuilder).newExploratoryStop(userInfo, userInstance);
-		verify(provisioningService).post(endpointDTO().getUrl() + exploratoryTerminate, TOKEN, eaDto, String.class);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(exploratoryDAO, computationalDAO, requestBuilder, provisioningService, requestId);
-	}
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(computationalDAO).updateComputationalStatusesForExploratory(USER, PROJECT, EXPLORATORY_NAME,
+                UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED,
+                UserInstanceStatus.FAILED);
+        verify(requestBuilder).newExploratoryStop(userInfo, userInstance, endpointDTO());
+        verify(provisioningService).post(endpointDTO().getUrl() + exploratoryTerminate, TOKEN, eaDto, String.class);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(exploratoryDAO, computationalDAO, requestBuilder, provisioningService, requestId);
+    }
 
 	@Test
 	public void terminateWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
-		try {
-			exploratoryService.terminate(userInfo, EXPLORATORY_NAME);
-		} catch (DlabException e) {
-			assertEquals("Could not exploratory/terminate exploratory environment expName: Exploratory for user " +
-					"with name not found", e.getMessage());
-		}
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+        try {
+            exploratoryService.terminate(userInfo, PROJECT, EXPLORATORY_NAME);
+        } catch (DlabException e) {
+            assertEquals("Could not terminate exploratory environment expName: Exploratory for user " +
+                    "with name not found", e.getMessage());
+        }
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void create() {
+		ProjectDTO projectDTO = getProjectDTO();
+		when(projectService.get(anyString())).thenReturn(projectDTO);
 		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 		doNothing().when(exploratoryDAO).insertExploratory(any(UserInstanceDTO.class));
 		ExploratoryGitCredsDTO egcDto = new ExploratoryGitCredsDTO();
 		when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDto);
 
 		ExploratoryCreateDTO ecDto = new ExploratoryCreateDTO();
-		Exploratory exploratory = Exploratory.builder().name(EXPLORATORY_NAME).build();
-		when(requestBuilder.newExploratoryCreate(any(Exploratory.class), any(UserInfo.class),
-				any(ExploratoryGitCredsDTO.class), anyMapOf(String.class, String.class))).thenReturn(ecDto);
+		Exploratory exploratory = Exploratory.builder().name(EXPLORATORY_NAME).endpoint("test").build();
+		when(requestBuilder.newExploratoryCreate(any(ProjectDTO.class), any(EndpointDTO.class),
+				any(Exploratory.class), any(UserInfo.class), any(ExploratoryGitCredsDTO.class), anyMapOf(String.class, String.class))).thenReturn(ecDto);
 		String exploratoryCreate = "exploratory/create";
 		when(provisioningService.post(anyString(), anyString(), any(ExploratoryCreateDTO.class), any()))
 				.thenReturn(UUID);
@@ -277,16 +310,18 @@
 
 		userInstance.withStatus("creating");
 		userInstance.withResources(Collections.emptyList());
+		verify(projectService).get("project");
 		verify(exploratoryDAO).insertExploratory(userInstance);
 		verify(gitCredsDAO).findGitCreds(USER);
-		verify(requestBuilder).newExploratoryCreate(exploratory, userInfo, egcDto, Collections.emptyMap());
+		verify(requestBuilder).newExploratoryCreate(projectDTO, endpointDTO(), exploratory, userInfo, egcDto, Collections.emptyMap());
 		verify(provisioningService).post(endpointDTO().getUrl() + exploratoryCreate, TOKEN, ecDto, String.class);
 		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(exploratoryDAO, gitCredsDAO, requestBuilder, provisioningService, requestId);
+		verifyNoMoreInteractions(projectService, exploratoryDAO, gitCredsDAO, requestBuilder, provisioningService, requestId);
 	}
 
 	@Test
 	public void createWhenMethodInsertExploratoryThrowsException() {
+		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 		doThrow(new RuntimeException("Exploratory for user with name not found"))
 				.when(exploratoryDAO).insertExploratory(any(UserInstanceDTO.class));
 		expectedException.expect(DlabException.class);
@@ -295,12 +330,14 @@
 
 		Exploratory exploratory = Exploratory.builder().name(EXPLORATORY_NAME).build();
 		exploratoryService.create(userInfo, exploratory, "project");
+		verify(endpointService).get(anyString());
 	}
 
 	@Test
 	public void createWhenMethodInsertExploratoryThrowsExceptionWithItsCatching() {
+		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 		doThrow(new RuntimeException()).when(exploratoryDAO).insertExploratory(any(UserInstanceDTO.class));
-		Exploratory exploratory = Exploratory.builder().name(EXPLORATORY_NAME).build();
+		Exploratory exploratory = Exploratory.builder().name(EXPLORATORY_NAME).endpoint("test").build();
 		try {
 			exploratoryService.create(userInfo, exploratory, "project");
 		} catch (DlabException e) {
@@ -311,21 +348,24 @@
 		userInstance.withResources(Collections.emptyList());
 		verify(exploratoryDAO).insertExploratory(userInstance);
 		verify(exploratoryDAO, never()).updateExploratoryStatus(any(StatusEnvBaseDTO.class));
+		verify(endpointService).get("test");
 		verifyNoMoreInteractions(exploratoryDAO);
 	}
 
 	@Test
 	public void createWhenMethodNewExploratoryCreateThrowsException() {
+		ProjectDTO projectDTO = getProjectDTO();
+		when(projectService.get(anyString())).thenReturn(projectDTO);
 		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 		doNothing().when(exploratoryDAO).insertExploratory(any(UserInstanceDTO.class));
 		ExploratoryGitCredsDTO egcDto = new ExploratoryGitCredsDTO();
 		when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDto);
 
-		Exploratory exploratory = Exploratory.builder().name(EXPLORATORY_NAME).build();
+		Exploratory exploratory = Exploratory.builder().name(EXPLORATORY_NAME).endpoint("test").build();
 
 		doThrow(new DlabException("Cannot create instance of resource class ")).when(requestBuilder)
-				.newExploratoryCreate(any(Exploratory.class), any(UserInfo.class), any(ExploratoryGitCredsDTO.class),
-						anyMapOf(String.class, String.class));
+				.newExploratoryCreate(any(ProjectDTO.class), any(EndpointDTO.class), any(Exploratory.class),
+						any(UserInfo.class), any(ExploratoryGitCredsDTO.class), anyMapOf(String.class, String.class));
 
 		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
 		try {
@@ -339,189 +379,126 @@
 
 		userInstance.withStatus("creating");
 		userInstance.withResources(Collections.emptyList());
+		verify(projectService).get("project");
+		verify(exploratoryDAO).insertExploratory(userInstance);
 		verify(exploratoryDAO).insertExploratory(userInstance);
 		verify(gitCredsDAO).findGitCreds(USER);
-		verify(requestBuilder).newExploratoryCreate(exploratory, userInfo, egcDto, Collections.emptyMap());
+		verify(requestBuilder).newExploratoryCreate(projectDTO, endpointDTO(), exploratory, userInfo, egcDto, Collections.emptyMap());
 		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO, gitCredsDAO, requestBuilder);
+		verifyNoMoreInteractions(projectService, exploratoryDAO, gitCredsDAO, requestBuilder);
 	}
 
 	@Test
-	public void updateExploratoryStatusesWithRunningStatus() {
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+	public void updateProjectExploratoryStatuses() {
+        when(exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(anyString(), anyString(), anyVararg()))
+                .thenReturn(singletonList(userInstance));
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
+                anyString(), any(UserInstanceStatus.class), any(UserInstanceStatus.class), anyVararg());
 
-		exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.RUNNING);
+        exploratoryService.updateProjectExploratoryStatuses("project", "endpoint",
+                UserInstanceStatus.TERMINATED);
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminated");
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("running");
+        verify(exploratoryDAO).fetchProjectExploratoriesWhereStatusNotIn("project", "endpoint",
+                UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(computationalDAO).updateComputationalStatusesForExploratory(USER, PROJECT,
+                EXPLORATORY_NAME, UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATED,
+                UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
 
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
-
-	@Test
-	public void updateExploratoryStatusesWithStoppingStatus() {
-		userInstance.setStatus("stopping");
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
-				any(UserInstanceStatus.class), any(UserInstanceStatus.class));
-
-		exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.STOPPING);
-
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME,
-				UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING, UserInstanceStatus.FAILED,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
-		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
-	}
-
-	@Test
-	public void updateExploratoryStatusesWithTerminatingStatus() {
-		userInstance.setStatus("terminating");
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class)))
-				.thenReturn(10);
-
-		exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.TERMINATING);
-
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME, UserInstanceStatus
-				.TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED, UserInstanceStatus
-				.FAILED);
-		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
-	}
-
-	@Test
-	public void updateUserExploratoriesReuploadKeyFlag() {
-		doNothing().when(exploratoryDAO).updateReuploadKeyForExploratories(anyString(), anyBoolean(),
-				any(UserInstanceStatus.class));
-
-		exploratoryService.updateExploratoriesReuploadKeyFlag(USER, true, UserInstanceStatus.RUNNING);
-
-		verify(exploratoryDAO).updateReuploadKeyForExploratories(USER, true, UserInstanceStatus.RUNNING);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
-
-	@Test
-	public void getInstancesWithStatuses() {
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), anyBoolean(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		exploratoryService.getInstancesWithStatuses(USER, UserInstanceStatus.RUNNING, UserInstanceStatus.RUNNING);
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, true, UserInstanceStatus.RUNNING);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
+    }
 
 	@Test
 	public void getUserInstance() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		Optional<UserInstanceDTO> expectedInstance = Optional.of(userInstance);
-		Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, EXPLORATORY_NAME);
-		assertEquals(expectedInstance, actualInstance);
+        Optional<UserInstanceDTO> expectedInstance = Optional.of(userInstance);
+        Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, PROJECT, EXPLORATORY_NAME);
+        assertEquals(expectedInstance, actualInstance);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getUserInstanceWithException() {
-		doThrow(new ResourceNotFoundException("Exploratory for user not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+        doThrow(new ResourceNotFoundException("Exploratory for user not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 
-		Optional<UserInstanceDTO> expectedInstance = Optional.empty();
-		Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, EXPLORATORY_NAME);
-		assertEquals(expectedInstance, actualInstance);
+        Optional<UserInstanceDTO> expectedInstance = Optional.empty();
+        Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, PROJECT, EXPLORATORY_NAME);
+        assertEquals(expectedInstance, actualInstance);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void testUpdateExploratoryClusterConfig() {
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
+                anyListOf(ClusterConfig.class), any(EndpointDTO.class))).thenReturn(new ExploratoryReconfigureSparkClusterActionDTO());
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryReconfigureSparkClusterActionDTO.class)
+                , any())).thenReturn(UUID);
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
-				anyListOf(ClusterConfig.class))).thenReturn(new ExploratoryReconfigureSparkClusterActionDTO());
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryReconfigureSparkClusterActionDTO.class)
-				, any())).thenReturn(UUID);
+        exploratoryService.updateClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME, singletonList(new ClusterConfig()));
 
-		exploratoryService.updateClusterConfig(getUserInfo(), EXPLORATORY_NAME, singletonList(new ClusterConfig()));
-
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(getUserInstanceDto()),
-				refEq(singletonList(new ClusterConfig())));
-		verify(requestId).put(USER, UUID);
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "exploratory/reconfigure_spark"), eq(TOKEN),
-				refEq(new ExploratoryReconfigureSparkClusterActionDTO(), "self"), eq(String.class));
-		verify(exploratoryDAO).updateExploratoryFields(refEq(new ExploratoryStatusDTO()
-				.withUser(USER)
-				.withConfig(singletonList(new ClusterConfig()))
-				.withStatus(UserInstanceStatus.RECONFIGURING.toString())
-				.withExploratoryName(EXPLORATORY_NAME), "self"));
-		verifyNoMoreInteractions(requestBuilder, requestId, exploratoryDAO, provisioningService);
-	}
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(getUserInstanceDto()),
+                refEq(singletonList(new ClusterConfig())), refEq(endpointDTO()));
+        verify(requestId).put(USER, UUID);
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "exploratory/reconfigure_spark"), eq(TOKEN),
+                refEq(new ExploratoryReconfigureSparkClusterActionDTO(), "self"), eq(String.class));
+        verify(exploratoryDAO).updateExploratoryFields(refEq(new ExploratoryStatusDTO()
+                .withUser(USER)
+                .withProject(PROJECT)
+                .withConfig(singletonList(new ClusterConfig()))
+                .withStatus(UserInstanceStatus.RECONFIGURING.toString())
+                .withExploratoryName(EXPLORATORY_NAME), "self"));
+        verifyNoMoreInteractions(requestBuilder, requestId, exploratoryDAO, provisioningService);
+    }
 
 	@Test
 	public void testUpdateExploratoryClusterConfigWhenNotRunning() {
 
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenThrow(new ResourceNotFoundException("EXCEPTION"));
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenThrow(new ResourceNotFoundException("EXCEPTION"));
 
-		try {
+        try {
 
-			exploratoryService.updateClusterConfig(getUserInfo(), EXPLORATORY_NAME,
-					singletonList(new ClusterConfig()));
-		} catch (ResourceNotFoundException e) {
-			assertEquals("EXCEPTION", e.getMessage());
-		}
+            exploratoryService.updateClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                    singletonList(new ClusterConfig()));
+        } catch (ResourceNotFoundException e) {
+            assertEquals("EXCEPTION", e.getMessage());
+        }
 
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(requestBuilder, requestId, provisioningService);
-
-	}
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+        verifyZeroInteractions(requestBuilder, requestId, provisioningService);
+    }
 
 	@Test
 	public void testGetClusterConfig() {
+        when(exploratoryDAO.getClusterConfig(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
+        final List<ClusterConfig> clusterConfig = exploratoryService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME);
 
-		when(exploratoryDAO.getClusterConfig(anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
-		final List<ClusterConfig> clusterConfig = exploratoryService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME);
+        assertEquals(1, clusterConfig.size());
+        assertEquals("classification", clusterConfig.get(0).getClassification());
 
-		assertEquals(1, clusterConfig.size());
-		assertEquals("classification", clusterConfig.get(0).getClassification());
-
-		verify(exploratoryDAO).getClusterConfig(getUserInfo().getName(), EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).getClusterConfig(getUserInfo().getName(), PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void testGetClusterConfigWithException() {
+        when(exploratoryDAO.getClusterConfig(anyString(), anyString(), anyString())).thenThrow(new RuntimeException("Exception"));
 
-		when(exploratoryDAO.getClusterConfig(anyString(), anyString())).thenThrow(new RuntimeException("Exception"));
-
-		expectedException.expect(RuntimeException.class);
-		expectedException.expectMessage("Exception");
-		exploratoryService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME);
-	}
+        expectedException.expect(RuntimeException.class);
+        expectedException.expectMessage("Exception");
+        exploratoryService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME);
+    }
 
 	private ClusterConfig getClusterConfig() {
 		final ClusterConfig config = new ClusterConfig();
@@ -534,26 +511,37 @@
 	}
 
 	private UserInstanceDTO getUserInstanceDto() {
-		UserComputationalResource compResource = new UserComputationalResource();
-		compResource.setImageName("YYYY.dataengine");
-		compResource.setComputationalName("compName");
-		compResource.setStatus("stopped");
-		compResource.setComputationalId("compId");
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME).withStatus("running")
-				.withResources(singletonList(compResource))
-				.withTags(Collections.emptyMap())
-				.withProject("project");
-	}
+        UserComputationalResource compResource = new UserComputationalResource();
+        compResource.setImageName("YYYY.dataengine");
+        compResource.setComputationalName("compName");
+        compResource.setStatus("stopped");
+        compResource.setComputationalId("compId");
+        return new UserInstanceDTO()
+                .withUser(USER)
+                .withExploratoryName(EXPLORATORY_NAME)
+                .withStatus("running")
+                .withResources(singletonList(compResource))
+                .withTags(Collections.emptyMap())
+                .withProject(PROJECT)
+                .withEndpoint("test")
+                .withCloudProvider(CloudProvider.AWS.toString());
+    }
 
 	private StatusEnvBaseDTO getStatusEnvBaseDTOWithStatus(String status) {
 		return new ExploratoryStatusDTO()
+                .withProject(PROJECT)
 				.withUser(USER)
 				.withExploratoryName(EXPLORATORY_NAME)
 				.withStatus(status);
 	}
 
 	private EndpointDTO endpointDTO() {
-		return new EndpointDTO("test", "url", "", null);
+		return new EndpointDTO("test", "url", "", null, EndpointDTO.EndpointStatus.ACTIVE, CloudProvider.AWS);
 	}
 
+	private ProjectDTO getProjectDTO() {
+		return new ProjectDTO("project", Collections.emptySet(), "", "", null,
+				singletonList(new ProjectEndpointDTO(ENDPOINT_NAME, UserInstanceStatus.RUNNING,
+						new EdgeInfo())), true);
+	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImplTest.java
index f6191a1..8b8acaa 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/GitCredentialServiceImplTest.java
@@ -26,6 +26,7 @@
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.exploratory.ExploratoryGitCredsDTO;
 import com.epam.dlab.dto.exploratory.ExploratoryGitCredsUpdateDTO;
@@ -76,7 +77,7 @@
 		when(exploratoryDAO.fetchRunningExploratoryFields(anyString())).thenReturn(Collections.singletonList(uiDto));
 
 		ExploratoryGitCredsUpdateDTO egcuDto = new ExploratoryGitCredsUpdateDTO().withExploratoryName(exploratoryName);
-		when(requestBuilder.newGitCredentialsUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
+		when(requestBuilder.newGitCredentialsUpdate(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
 				any(ExploratoryGitCredsDTO.class))).thenReturn(egcuDto);
 
 		String uuid = "someUuid";
@@ -89,7 +90,7 @@
 
 		verify(gitCredsDAO).updateGitCreds(USER, egcDto);
 		verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
-		verify(requestBuilder).newGitCredentialsUpdate(userInfo, uiDto, egcDto);
+		verify(requestBuilder).newGitCredentialsUpdate(userInfo, uiDto, endpointDTO(), egcDto);
 		verify(provisioningService).post(endpointDTO().getUrl() + "exploratory/git_creds", token, egcuDto,
 				String.class);
 		verify(requestId).put(USER, uuid);
@@ -116,6 +117,7 @@
 
 	@Test
 	public void updateGitCredentialsWithFailedNotebooks() {
+		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 		String token = "token";
 		UserInfo userInfo = new UserInfo(USER, token);
 		doNothing().when(gitCredsDAO).updateGitCreds(anyString(), any(ExploratoryGitCredsDTO.class));
@@ -126,7 +128,7 @@
 
 		doThrow(new DlabException("Cannot create instance of resource class "))
 				.when(requestBuilder).newGitCredentialsUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(ExploratoryGitCredsDTO.class));
+				any(EndpointDTO.class), any(ExploratoryGitCredsDTO.class));
 
 		ExploratoryGitCredsDTO egcDto = new ExploratoryGitCredsDTO();
 		try {
@@ -138,7 +140,7 @@
 
 		verify(gitCredsDAO).updateGitCreds(USER, egcDto);
 		verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
-		verify(requestBuilder).newGitCredentialsUpdate(userInfo, uiDto, egcDto);
+		verify(requestBuilder).newGitCredentialsUpdate(userInfo, uiDto, endpointDTO(), egcDto);
 		verifyNoMoreInteractions(gitCredsDAO, exploratoryDAO, requestBuilder);
 	}
 
@@ -168,6 +170,6 @@
 	}
 
 	private EndpointDTO endpointDTO() {
-		return new EndpointDTO("test", "url", "", null);
+		return new EndpointDTO("test", "url", "", null, EndpointDTO.EndpointStatus.ACTIVE, CloudProvider.AWS);
 	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
index 39ff670..e15044b 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
@@ -24,9 +24,12 @@
 import com.epam.dlab.backendapi.dao.ExploratoryLibDAO;
 import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
 import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.exploratory.ExploratoryImageDTO;
 import com.epam.dlab.dto.exploratory.ExploratoryStatusDTO;
@@ -55,7 +58,16 @@
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.anyVararg;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class ImageExploratoryServiceImplTest {
@@ -81,6 +93,8 @@
 	private RequestBuilder requestBuilder;
 	@Mock
 	private EndpointService endpointService;
+	@Mock
+	private ProjectService projectService;
 
 	@InjectMocks
 	private ImageExploratoryServiceImpl imageExploratoryService;
@@ -97,95 +111,99 @@
 
 	@Test
 	public void createImage() {
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(projectService.get(anyString())).thenReturn(getProjectDTO());
+		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(false);
 
-		when(libDAO.getLibraries(anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
+		when(libDAO.getLibraries(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
 		doNothing().when(imageExploratoryDao).save(any(Image.class));
 		when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
 				.thenReturn(mock(UpdateResult.class));
 		ExploratoryImageDTO eiDto = new ExploratoryImageDTO();
 		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(requestBuilder.newExploratoryImageCreate(any(UserInfo.class), any(UserInstanceDTO.class), anyString()))
-				.thenReturn(eiDto);
+		when(requestBuilder.newExploratoryImageCreate(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
+				any(EndpointDTO.class), any(ProjectDTO.class))).thenReturn(eiDto);
 
 		String expectedUuid = "someUuid";
 		when(provisioningService.post(anyString(), anyString(), any(ExploratoryImageDTO.class), any()))
 				.thenReturn(expectedUuid);
 
 		String imageName = "someImageName", imageDescription = "someDescription";
-		String actualUuid = imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName,
-				imageDescription);
+		String actualUuid = imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME,
+				imageName, imageDescription);
 		assertNotNull(actualUuid);
 		assertEquals(expectedUuid, actualUuid);
 
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(projectService).get(PROJECT);
+		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
 		verify(imageExploratoryDao).exist(imageName, PROJECT);
 		verify(imageExploratoryDao).save(any(Image.class));
-		verify(libDAO).getLibraries(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName);
+		verify(libDAO).getLibraries(USER, PROJECT, EXPLORATORY_NAME);
+		verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO(), getProjectDTO());
 		verify(endpointService).get(anyString());
 		verify(provisioningService).post(endpointDTO().getUrl() + "exploratory/image", TOKEN, eiDto, String.class);
-		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService, provisioningService);
+		verifyNoMoreInteractions(projectService, exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService, provisioningService);
 	}
 
 	@Test
 	public void createImageWhenMethodFetchRunningExploratoryFieldsThrowsException() {
 		doThrow(new DlabException("Running exploratory instance for user with name not found."))
-				.when(exploratoryDAO).fetchRunningExploratoryFields(anyString(), anyString());
+				.when(exploratoryDAO).fetchRunningExploratoryFields(anyString(), anyString(), anyString());
 
 		String imageName = "someImageName", imageDescription = "someDescription";
 
 		try {
-			imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+			imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
 		} catch (DlabException e) {
 			assertEquals("Running exploratory instance for user with name not found.", e.getMessage());
 		}
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO);
 	}
 
 	@Test
 	public void createImageWhenResourceAlreadyExists() {
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(true);
 
 		expectedException.expect(ResourceAlreadyExistException.class);
 		expectedException.expectMessage("Image with name someImageName is already exist");
 
 		String imageName = "someImageName", imageDescription = "someDescription";
-		imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+		imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
 	}
 
 	@Test
 	public void createImageWhenMethodNewExploratoryImageCreateThrowsException() {
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(projectService.get(anyString())).thenReturn(getProjectDTO());
+		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(false);
 
-		when(libDAO.getLibraries(anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
+		when(libDAO.getLibraries(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
 		doNothing().when(imageExploratoryDao).save(any(Image.class));
 		when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
 				.thenReturn(mock(UpdateResult.class));
 		doThrow(new DlabException("Cannot create instance of resource class")).when(requestBuilder)
-				.newExploratoryImageCreate(any(UserInfo.class), any(UserInstanceDTO.class), anyString());
+				.newExploratoryImageCreate(any(UserInfo.class), any(UserInstanceDTO.class), anyString(), any(EndpointDTO.class), any(ProjectDTO.class));
 		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 
 		String imageName = "someImageName", imageDescription = "someDescription";
 		try {
-			imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+			imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
 		} catch (DlabException e) {
 			assertEquals("Cannot create instance of resource class", e.getMessage());
 		}
 
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(projectService).get(PROJECT);
+		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
 		verify(imageExploratoryDao).exist(imageName, PROJECT);
 		verify(imageExploratoryDao).save(any(Image.class));
-		verify(libDAO).getLibraries(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName);
+		verify(libDAO).getLibraries(USER, PROJECT, EXPLORATORY_NAME);
+		verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO(), getProjectDTO());
 		verify(endpointService).get(anyString());
-		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService);
+		verifyNoMoreInteractions(projectService, exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService);
 	}
 
 	@Test
@@ -193,13 +211,13 @@
 		when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
 				.thenReturn(mock(UpdateResult.class));
 		doNothing().when(imageExploratoryDao).updateImageFields(any(Image.class));
-		doNothing().when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString());
+		doNothing().when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString(), anyString());
 
 		String notebookIp = "someIp";
 		imageExploratoryService.finishImageCreate(image, EXPLORATORY_NAME, notebookIp);
 
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
-		verify(exploratoryDAO).updateExploratoryIp(USER, notebookIp, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateExploratoryIp(USER, PROJECT, notebookIp, EXPLORATORY_NAME);
 		verify(imageExploratoryDao).updateImageFields(image);
 		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
 	}
@@ -210,7 +228,7 @@
 				.thenReturn(mock(UpdateResult.class));
 		doNothing().when(imageExploratoryDao).updateImageFields(any(Image.class));
 		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString());
+				.when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString(), anyString());
 
 		String notebookIp = "someIp";
 		try {
@@ -220,7 +238,7 @@
 		}
 
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
-		verify(exploratoryDAO).updateExploratoryIp(USER, notebookIp, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateExploratoryIp(USER, PROJECT, notebookIp, EXPLORATORY_NAME);
 		verify(imageExploratoryDao).updateImageFields(image);
 		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
 	}
@@ -234,7 +252,7 @@
 		imageExploratoryService.finishImageCreate(image, EXPLORATORY_NAME, null);
 
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
-		verify(exploratoryDAO, never()).updateExploratoryIp(USER, null, EXPLORATORY_NAME);
+		verify(exploratoryDAO, never()).updateExploratoryIp(USER, PROJECT, null, EXPLORATORY_NAME);
 		verify(imageExploratoryDao).updateImageFields(image);
 		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
 	}
@@ -259,23 +277,24 @@
 	@Test
 	public void getImage() {
 		ImageInfoRecord expectedImageInfoRecord = getImageInfoRecord();
-		when(imageExploratoryDao.getImage(anyString(), anyString())).thenReturn(Optional.of(expectedImageInfoRecord));
+		when(imageExploratoryDao.getImage(anyString(), anyString(), anyString(), anyString()))
+				.thenReturn(Optional.of(expectedImageInfoRecord));
 
-		ImageInfoRecord actualImageInfoRecord = imageExploratoryService.getImage(USER, "someName");
+		ImageInfoRecord actualImageInfoRecord = imageExploratoryService.getImage(USER, "someName", "someProject", "someEndpoint");
 		assertNotNull(actualImageInfoRecord);
 		assertEquals(expectedImageInfoRecord, actualImageInfoRecord);
 
-		verify(imageExploratoryDao).getImage(USER, "someName");
+		verify(imageExploratoryDao).getImage(USER, "someName", "someProject", "someEndpoint");
 		verifyNoMoreInteractions(imageExploratoryDao);
 	}
 
 	@Test
 	public void getImageWhenMethodGetImageReturnsOptionalEmpty() {
-		when(imageExploratoryDao.getImage(anyString(), anyString())).thenReturn(Optional.empty());
+		when(imageExploratoryDao.getImage(anyString(), anyString(), anyString(), anyString())).thenReturn(Optional.empty());
 		expectedException.expect(ResourceNotFoundException.class);
 		expectedException.expectMessage(String.format("Image with name %s was not found for user %s",
 				"someImageName", USER));
-		imageExploratoryService.getImage(USER, "someImageName");
+		imageExploratoryService.getImage(USER, "someImageName", "someProject", "someEndpoint");
 	}
 
 	@Test
@@ -289,7 +308,7 @@
 	}
 
 	private ImageInfoRecord getImageInfoRecord() {
-		return new ImageInfoRecord("someName", "someDescription", "someProject", "someEndpoint", "someApp",
+		return new ImageInfoRecord("someName", "someDescription", "someProject", "someEndpoint", "someUser", "someApp",
 				"someFullName", ImageStatus.CREATED);
 	}
 
@@ -299,6 +318,7 @@
 				.description("someDescription")
 				.status(ImageStatus.CREATING)
 				.user(USER)
+				.project(PROJECT)
 				.libraries(Collections.singletonList(getLibrary()))
 				.computationalLibraries(Collections.emptyMap())
 				.dockerImage("someImageName")
@@ -311,8 +331,11 @@
 	}
 
 	private UserInstanceDTO getUserInstanceDto() {
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
-				.withExploratoryId("explId").withProject(PROJECT);
+		return new UserInstanceDTO()
+				.withUser(USER)
+				.withExploratoryName(EXPLORATORY_NAME)
+				.withExploratoryId("explId")
+				.withProject(PROJECT);
 	}
 
 	private UserInfo getUserInfo() {
@@ -320,6 +343,10 @@
 	}
 
 	private EndpointDTO endpointDTO() {
-		return new EndpointDTO("test", "url", "", null);
+		return new EndpointDTO("test", "url", "", null, EndpointDTO.EndpointStatus.ACTIVE, CloudProvider.AWS);
+	}
+
+	private ProjectDTO getProjectDTO() {
+		return ProjectDTO.builder().name(PROJECT).build();
 	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceBaseTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceBaseTest.java
index fdbacab..56c838a 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceBaseTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/InfrastructureTemplateServiceBaseTest.java
@@ -20,11 +20,14 @@
 package com.epam.dlab.backendapi.service.impl;
 
 import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.dao.SettingsDAO;
+import com.epam.dlab.backendapi.dao.UserGroupDao;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
 import com.epam.dlab.dto.imagemetadata.ComputationalMetadataDTO;
 import com.epam.dlab.dto.imagemetadata.ComputationalResourceShapeDto;
@@ -55,6 +58,10 @@
 	private ProjectDAO projectDAO;
 	@Mock
 	private EndpointService endpointService;
+	@Mock
+	private UserGroupDao userGroupDao;
+	@Mock
+	private SelfServiceApplicationConfiguration configuration;
 
 	@InjectMocks
 	private InfrastructureTemplateServiceBaseChild infrastructureTemplateServiceBaseChild =
@@ -81,9 +88,8 @@
 						"someRam2", 6)));
 		emDto2.setExploratoryEnvironmentShapes(shapes2);
 		List<ExploratoryMetadataDTO> expectedEmdDtoList = Arrays.asList(emDto1, emDto2);
-		when(projectDAO.get(anyString())).thenReturn(Optional.of(new ProjectDTO("project", Collections.emptySet(),
-				null, null, null, null)));
-		when(provisioningService.get(anyString(), anyString(), any())).thenReturn(expectedEmdDtoList.toArray());
+		when(userGroupDao.getUserGroups(anyString())).thenReturn(Collections.emptySet());
+		when(provisioningService.get(anyString(), anyString(), any(Class.class))).thenReturn(expectedEmdDtoList.toArray());
 		when(settingsDAO.getConfOsFamily()).thenReturn("someConfOsFamily");
 
 		UserInfo userInfo = new UserInfo("test", "token");
@@ -94,14 +100,15 @@
 
 		verify(provisioningService).get(endpointDTO().getUrl() + "docker/exploratory", "token", ExploratoryMetadataDTO[].class);
 		verify(settingsDAO, times(2)).getConfOsFamily();
-		verifyNoMoreInteractions(provisioningService, settingsDAO);
+		verify(userGroupDao).getUserGroups("test");
+		verifyNoMoreInteractions(provisioningService, settingsDAO, userGroupDao);
 	}
 
 	@Test
 	public void getExploratoryTemplatesWithException() {
 		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 		doThrow(new DlabException("Could not load list of exploratory templates for user"))
-				.when(provisioningService).get(anyString(), anyString(), any());
+				.when(provisioningService).get(anyString(), anyString(), any(Class.class));
 
 		UserInfo userInfo = new UserInfo("test", "token");
 		try {
@@ -123,8 +130,8 @@
 				computationalMetadataDTO
 		);
 		when(projectDAO.get(anyString())).thenReturn(Optional.of(new ProjectDTO("project", Collections.emptySet(),
-				null, null, null, null)));
-		when(provisioningService.get(anyString(), anyString(), any())).thenReturn(expectedCmdDtoList.toArray(new ComputationalMetadataDTO[]{}));
+				null, null, null, null, true)));
+		when(provisioningService.get(anyString(), anyString(), any(Class.class))).thenReturn(expectedCmdDtoList.toArray(new ComputationalMetadataDTO[]{}));
 
 		List<FullComputationalTemplate> expectedFullCmdDtoList = expectedCmdDtoList.stream()
 				.map(e -> infrastructureTemplateServiceBaseChild.getCloudFullComputationalTemplate(e))
@@ -147,7 +154,7 @@
 	public void getComputationalTemplatesWhenMethodThrowsException() {
 		when(endpointService.get(anyString())).thenReturn(endpointDTO());
 		doThrow(new DlabException("Could not load list of computational templates for user"))
-				.when(provisioningService).get(anyString(), anyString(), any());
+				.when(provisioningService).get(anyString(), anyString(), any(Class.class));
 
 		UserInfo userInfo = new UserInfo("test", "token");
 		try {
@@ -166,9 +173,13 @@
 		final ComputationalMetadataDTO computationalMetadataDTO = new ComputationalMetadataDTO("dataengine-service");
 		computationalMetadataDTO.setComputationResourceShapes(Collections.emptyMap());
 		List<ComputationalMetadataDTO> expectedCmdDtoList = Collections.singletonList(computationalMetadataDTO);
-		when(provisioningService.get(anyString(), anyString(), any())).thenReturn(expectedCmdDtoList.toArray(new ComputationalMetadataDTO[]{}));
+		when(provisioningService.get(anyString(), anyString(), any(Class.class))).thenReturn(expectedCmdDtoList.toArray(new ComputationalMetadataDTO[]{}));
 		when(projectDAO.get(anyString())).thenReturn(Optional.of(new ProjectDTO("project", Collections.emptySet(),
-				null, null, null, null)));
+				null, null, null, null, true)));
+		when(configuration.getMinEmrInstanceCount()).thenReturn(1);
+		when(configuration.getMaxEmrInstanceCount()).thenReturn(2);
+		when(configuration.getMaxEmrSpotInstanceBidPct()).thenReturn(3);
+		when(configuration.getMinEmrSpotInstanceBidPct()).thenReturn(4);
 
 		UserInfo userInfo = new UserInfo("test", "token");
 		try {
@@ -185,19 +196,19 @@
 			IllegalAccessException {
 		Field computationalMetadataDTO1 = object1.getClass().getDeclaredField("computationalMetadataDTO");
 		computationalMetadataDTO1.setAccessible(true);
-		Field computationalMetadataDTO2 = object2.getClass().getDeclaredField("computationalMetadataDTO");
+		Field computationalMetadataDTO2 = object2.getClass().getSuperclass().getDeclaredField("computationalMetadataDTO");
 		computationalMetadataDTO2.setAccessible(true);
 		return computationalMetadataDTO1.get(object1).equals(computationalMetadataDTO2.get(object2));
 	}
 
-	private class InfrastructureTemplateServiceBaseChild extends InfrastructureTemplateServiceBase {
-		@Override
+	private EndpointDTO endpointDTO() {
+		return new EndpointDTO("test", "url", "", null, EndpointDTO.EndpointStatus.ACTIVE, CloudProvider.AWS);
+	}
+
+	private class InfrastructureTemplateServiceBaseChild extends InfrastructureTemplateServiceImpl {
+
 		protected FullComputationalTemplate getCloudFullComputationalTemplate(ComputationalMetadataDTO metadataDTO) {
 			return new FullComputationalTemplate(metadataDTO);
 		}
 	}
-
-	private EndpointDTO endpointDTO() {
-		return new EndpointDTO("test", "url", "", null);
-	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
index 4a076f8..3677929 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
@@ -30,6 +30,7 @@
 import com.epam.dlab.backendapi.resources.dto.LibraryStatus;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
+import com.epam.dlab.cloud.CloudProvider;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.computational.UserComputationalResource;
 import com.epam.dlab.dto.exploratory.LibInstallDTO;
@@ -55,38 +56,46 @@
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class LibraryServiceImplTest {
 
-	private static final String LIB_NAME = "name";
-	private static final String LIB_GROUP = "group";
-	private static final String LIB_VERSION = "version";
-	private static final String UUID = "id";
-	private final String USER = "test";
-	private final String EXPLORATORY_NAME = "explName";
-	private final String COMPUTATIONAL_NAME = "compName";
+    private static final String LIB_NAME = "name";
+    private static final String LIB_GROUP = "group";
+    private static final String LIB_VERSION = "version";
+    private static final String UUID = "id";
+    private final String USER = "test";
+    private final String EXPLORATORY_NAME = "explName";
+    private final String PROJECT = "projectName";
+    private final String COMPUTATIONAL_NAME = "compName";
 
-	private LibInstallDTO liDto;
-	private List<LibInstallDTO> libs;
-	private LibInstallFormDTO libInstallFormDTO;
-	private LibraryInstallDTO libraryInstallDto;
+    private LibInstallDTO liDto;
+    private List<LibInstallDTO> libs;
+    private LibInstallFormDTO libInstallFormDTO;
+    private LibraryInstallDTO libraryInstallDto;
 
-	@Mock
-	private ExploratoryDAO exploratoryDAO;
-	@Mock
-	private ExploratoryLibDAO libraryDAO;
-	@Mock
-	private RequestBuilder requestBuilder;
-	@Mock
-	private RequestId requestId;
-	@Mock
-	private RESTService provisioningService;
-	@Mock
-	private EndpointService endpointService;
+    @Mock
+    private ExploratoryDAO exploratoryDAO;
+    @Mock
+    private ExploratoryLibDAO libraryDAO;
+    @Mock
+    private RequestBuilder requestBuilder;
+    @Mock
+    private RequestId requestId;
+    @Mock
+    private RESTService provisioningService;
+    @Mock
+    private EndpointService endpointService;
 
-	@Rule
+    @Rule
 	public ExpectedException expectedException = ExpectedException.none();
 
 	@InjectMocks
@@ -99,241 +108,245 @@
 
 	@Test
 	public void testGetLibs() {
-		Document document = new Document();
-		when(libraryDAO.findExploratoryLibraries(anyString(), anyString())).thenReturn(document);
+        Document document = new Document();
+        when(libraryDAO.findExploratoryLibraries(anyString(), anyString(), anyString())).thenReturn(document);
 
-		List<Document> expectedList = new ArrayList<>();
-		List<Document> actualList = libraryService.getLibs(USER, EXPLORATORY_NAME, "");
-		assertNotNull(actualList);
-		assertEquals(expectedList, actualList);
+        List<Document> expectedList = new ArrayList<>();
+        List<Document> actualList = libraryService.getLibs(USER, PROJECT, EXPLORATORY_NAME, "");
+        assertNotNull(actualList);
+        assertEquals(expectedList, actualList);
 
-		verify(libraryDAO).findExploratoryLibraries(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO);
-	}
+        verify(libraryDAO).findExploratoryLibraries(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO);
+    }
 
 	@Test
 	public void getLibInfo() {
-		Document document = new Document();
-		when(libraryDAO.findAllLibraries(anyString(), anyString())).thenReturn(document);
+        Document document = new Document();
+        when(libraryDAO.findAllLibraries(anyString(), anyString(), anyString())).thenReturn(document);
 
-		List<LibInfoRecord> expectedList = new ArrayList<>();
-		List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, EXPLORATORY_NAME);
-		assertNotNull(actualList);
-		assertEquals(expectedList, actualList);
+        List<LibInfoRecord> expectedList = new ArrayList<>();
+        List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(actualList);
+        assertEquals(expectedList, actualList);
 
-		verify(libraryDAO).findAllLibraries(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO);
-	}
+        verify(libraryDAO).findAllLibraries(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO);
+    }
 
 	@Test
 	public void getLibInfoWhenListsOfExploratoryAndComputationalLibsAreNotEmpty() {
-		when(libraryDAO.findAllLibraries(anyString(), anyString()))
-				.thenReturn(getDocumentWithExploratoryAndComputationalLibs());
+        when(libraryDAO.findAllLibraries(anyString(), anyString(), anyString()))
+                .thenReturn(getDocumentWithExploratoryAndComputationalLibs());
 
-		List<LibInfoRecord> expectedList = getLibInfoRecordList();
-		List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, EXPLORATORY_NAME);
-		assertNotNull(actualList);
-		assertEquals(expectedList, actualList);
+        List<LibInfoRecord> expectedList = getLibInfoRecordList();
+        List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(actualList);
+        assertEquals(expectedList, actualList);
 
-		verify(libraryDAO).findAllLibraries(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO);
-	}
+        verify(libraryDAO).findAllLibraries(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO);
+    }
 
 	@Test
 	public void installComputationalLibsWithoutOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class))).thenReturn(libraryInstallDTO);
 
 
-		final String uuid = libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				getLibs(null));
+        final String uuid = libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                COMPUTATIONAL_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
-				refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"), eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME),
-				refEq(libsToInstall.get(0)), eq(false));
-		verify(requestId).put(user.getName(), UUID);
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-	}
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
+                refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"), eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
+                eq(COMPUTATIONAL_NAME), refEq(libsToInstall.get(0)), eq(false));
+        verify(requestId).put(user.getName(), UUID);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installComputationalLibsWhenComputationalNotFound() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        libraryInstallDTO.setProject(PROJECT);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+                .thenReturn(libraryInstallDTO);
 
 
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Computational with name " + COMPUTATIONAL_NAME + "X was not found");
+        expectedException.expect(DlabException.class);
+        expectedException.expectMessage("Computational with name " + COMPUTATIONAL_NAME + "X was not found");
 
-		libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME + "X",
-				getLibs(null));
-	}
+        libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                COMPUTATIONAL_NAME + "X", getLibs(null));
+    }
 
 	@Test
 	public void installComputationalLibsWithOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setProject(PROJECT);
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+                .thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
 
-		final String uuid = libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				getLibs(null));
+        final String uuid = libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                COMPUTATIONAL_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		libsToInstall.get(0).setOverride(true);
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME),
-				refEq(libsToInstall.get(0)), eq(true));
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
-				refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"),
-				eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(requestId).put(user.getName(), UUID);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+        libsToInstall.get(0).setOverride(true);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
+                eq(COMPUTATIONAL_NAME), refEq(libsToInstall.get(0)), eq(true));
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
+                refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"),
+                eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(requestId).put(user.getName(), UUID);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
 
-	}
+    }
 
 
 	@Test
 	public void installComputationalLibsWhenLibraryIsAlreadyInstalling() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+                .thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
 
-		try {
-			libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-					getLibs(null));
-		} catch (DlabException e) {
-			assertEquals("Library name is already installing", e.getMessage());
-		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-
-	}
+        try {
+            libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                    COMPUTATIONAL_NAME, getLibs(null));
+        } catch (DlabException e) {
+            assertEquals("Library name is already installing", e.getMessage());
+        }
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installExploratoryLibsWithoutOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
 
 
-		final String uuid = libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
+        final String uuid = libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(libsToInstall));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(false));
-		verify(requestId).put(user.getName(), UUID);
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-	}
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(false));
+        verify(requestId).put(user.getName(), UUID);
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installExploratoryLibsWithOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
 
-		final String uuid = libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
+        final String uuid = libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		libsToInstall.get(0).setOverride(true);
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(true));
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(libsToInstall));
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(requestId).put(USER, uuid);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-	}
+        libsToInstall.get(0).setOverride(true);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(true));
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(requestId).put(USER, uuid);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installExploratoryLibsWhenLibIsAlreadyInstalling() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
 
-		try {
-			libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
-		} catch (DlabException e) {
-			assertEquals("Library name is already installing", e.getMessage());
-		}
+        try {
+            libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
+        } catch (DlabException e) {
+            assertEquals("Library name is already installing", e.getMessage());
+        }
 
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
 
-	}
+    }
 
 	private Library getLibrary(LibStatus status) {
 		return new Library(LIB_GROUP, LIB_NAME, "1", status, "");
@@ -420,7 +433,7 @@
 	}
 
 	private EndpointDTO endpointDTO() {
-		return new EndpointDTO("test", "url", "", null);
+		return new EndpointDTO("test", "url", "", null, EndpointDTO.EndpointStatus.ACTIVE, CloudProvider.AWS);
 	}
 
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
index 6e4bdc1..1aefbbb 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
@@ -22,21 +22,13 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
-import com.epam.dlab.backendapi.dao.KeyDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.ExploratoryService;
-import com.epam.dlab.backendapi.service.UserResourceService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.aws.edge.EdgeInfoAws;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.dto.base.edge.EdgeInfo;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyCallbackDTO;
-import com.epam.dlab.dto.reuploadkey.ReuploadKeyDTO;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatus;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatusDTO;
-import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.model.ResourceType;
 import com.epam.dlab.rest.client.RESTService;
@@ -48,16 +40,18 @@
 import org.junit.runner.RunWith;
 import org.mockito.InjectMocks;
 import org.mockito.Mock;
-import org.mockito.Mockito;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.util.*;
-
-import static com.epam.dlab.dto.UserInstanceStatus.REUPLOADING_KEY;
 import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class ReuploadKeyServiceImplTest {
@@ -69,8 +63,6 @@
 	private UserInfo userInfo;
 
 	@Mock
-	private KeyDAO keyDAO;
-	@Mock
 	private RESTService provisioningService;
 	@Mock
 	private RequestBuilder requestBuilder;
@@ -82,8 +74,6 @@
 	private ComputationalDAO computationalDAO;
 	@Mock
 	private ExploratoryDAO exploratoryDAO;
-	@Mock
-	private UserResourceService userResourceService;
 
 	@InjectMocks
 	private ReuploadKeyServiceImpl reuploadKeyService;
@@ -97,351 +87,94 @@
 		userInfo = getUserInfo();
 	}
 
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKey() {
-		doNothing().when(userResourceService).updateReuploadKeyFlagForUserResources(anyString(), anyBoolean());
-		List<UserInstanceDTO> instances = Collections.singletonList(getUserInstance());
-		when(exploratoryService.getInstancesWithStatuses(anyString(), any(UserInstanceStatus.class),
-				any(UserInstanceStatus.class))).thenReturn(instances);
-		List<ResourceData> resourceList = new ArrayList<>();
-		resourceList.add(new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null));
-		when(userResourceService.convertToResourceData(any(List.class))).thenReturn(resourceList);
-
-		Optional<EdgeInfoAws> edgeInfo = Optional.of(new EdgeInfoAws());
-		Mockito.<Optional<? extends EdgeInfo>>when(keyDAO.getEdgeInfoWhereStatusIn(anyString(), anyVararg()))
-				.thenReturn(edgeInfo);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-
-		doNothing().when(exploratoryDAO).updateStatusForExploratories(any(UserInstanceStatus.class), anyString(),
-				any(UserInstanceStatus.class));
-		doNothing().when(computationalDAO).updateStatusForComputationalResources(any(UserInstanceStatus.class),
-				anyString(), any(List.class), any(List.class), any(UserInstanceStatus.class));
-		ReuploadKeyDTO reuploadFile = mock(ReuploadKeyDTO.class);
-		when(requestBuilder.newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class)))
-				.thenReturn(reuploadFile);
-		String expectedUuid = "someUuid";
-		when(provisioningService.post(anyString(), anyString(), any(ReuploadKeyDTO.class), any()))
-				.thenReturn(expectedUuid);
-
-		String keyContent = "keyContent";
-		String actualUuid = reuploadKeyService.reuploadKey(userInfo, keyContent);
-		assertNotNull(actualUuid);
-		assertEquals(expectedUuid, actualUuid);
-		assertEquals(2, resourceList.size());
-
-		verify(userResourceService).updateReuploadKeyFlagForUserResources(USER, true);
-		verify(exploratoryService).getInstancesWithStatuses(USER, RUNNING, RUNNING);
-		verify(userResourceService).convertToResourceData(instances);
-		verify(keyDAO).getEdgeInfoWhereStatusIn(USER, RUNNING);
-		verify(keyDAO).updateEdgeStatus(USER, "reuploading key");
-		verify(exploratoryDAO).updateStatusForExploratories(REUPLOADING_KEY, USER, RUNNING);
-		verify(computationalDAO).updateStatusForComputationalResources(REUPLOADING_KEY, USER,
-				Arrays.asList(RUNNING, REUPLOADING_KEY), Arrays.asList(DataEngineType.SPARK_STANDALONE,
-						DataEngineType.CLOUD_SERVICE), RUNNING);
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(keyContent), any(List.class));
-		verify(provisioningService).post("/key/reupload", TOKEN, reuploadFile, String.class);
-		verifyNoMoreInteractions(userResourceService, exploratoryService, keyDAO, exploratoryDAO, computationalDAO,
-				requestBuilder, provisioningService);
-		verifyZeroInteractions(requestId);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKeyWithoutEdge() {
-		doNothing().when(userResourceService).updateReuploadKeyFlagForUserResources(anyString(), anyBoolean());
-		List<UserInstanceDTO> instances = Collections.singletonList(getUserInstance());
-		when(exploratoryService.getInstancesWithStatuses(anyString(), any(UserInstanceStatus.class),
-				any(UserInstanceStatus.class))).thenReturn(instances);
-		List<ResourceData> resourceList = new ArrayList<>();
-		resourceList.add(new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null));
-		when(userResourceService.convertToResourceData(any(List.class))).thenReturn(resourceList);
-		when(keyDAO.getEdgeInfoWhereStatusIn(anyString(), anyVararg())).thenReturn(Optional.empty());
-		doNothing().when(exploratoryDAO).updateStatusForExploratories(any(UserInstanceStatus.class), anyString(),
-				any(UserInstanceStatus.class));
-		doNothing().when(computationalDAO).updateStatusForComputationalResources(any(UserInstanceStatus.class),
-				anyString(), any(List.class), any(List.class), any(UserInstanceStatus.class));
-		ReuploadKeyDTO reuploadFile = mock(ReuploadKeyDTO.class);
-		when(requestBuilder.newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class)))
-				.thenReturn(reuploadFile);
-		String expectedUuid = "someUuid";
-		when(provisioningService.post(anyString(), anyString(), any(ReuploadKeyDTO.class), any()))
-				.thenReturn(expectedUuid);
-
-		String keyContent = "keyContent";
-		String actualUuid = reuploadKeyService.reuploadKey(userInfo, keyContent);
-		assertNotNull(actualUuid);
-		assertEquals(expectedUuid, actualUuid);
-		assertEquals(1, resourceList.size());
-
-		verify(userResourceService).updateReuploadKeyFlagForUserResources(USER, true);
-		verify(exploratoryService).getInstancesWithStatuses(USER, RUNNING, RUNNING);
-		verify(userResourceService).convertToResourceData(instances);
-		verify(keyDAO).getEdgeInfoWhereStatusIn(USER, RUNNING);
-		verify(exploratoryDAO).updateStatusForExploratories(REUPLOADING_KEY, USER, RUNNING);
-		verify(computationalDAO).updateStatusForComputationalResources(REUPLOADING_KEY, USER,
-				Arrays.asList(RUNNING, REUPLOADING_KEY), Arrays.asList(DataEngineType.SPARK_STANDALONE,
-						DataEngineType.CLOUD_SERVICE), RUNNING);
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(keyContent), any(List.class));
-		verify(provisioningService).post("/key/reupload", TOKEN, reuploadFile, String.class);
-		verifyNoMoreInteractions(userResourceService, exploratoryService, keyDAO, exploratoryDAO, computationalDAO,
-				requestBuilder, provisioningService);
-		verifyZeroInteractions(requestId);
-	}
-
 	@Test
 	public void updateResourceDataForEdgeWhenStatusCompleted() {
 		ResourceData resource = new ResourceData(ResourceType.EDGE, "someId", null, null);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-		doNothing().when(keyDAO).updateEdgeReuploadKey(anyString(), anyBoolean(), anyVararg());
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.COMPLETED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(keyDAO).updateEdgeStatus(USER, "running");
-		verify(keyDAO).updateEdgeReuploadKey(USER, false, UserInstanceStatus.values());
-		verifyNoMoreInteractions(keyDAO);
 		verifyZeroInteractions(exploratoryDAO, computationalDAO);
 	}
 
 	@Test
 	public void updateResourceDataForEdgeWhenStatusFailed() {
 		ResourceData resource = new ResourceData(ResourceType.EDGE, "someId", null, null);
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
 
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.FAILED);
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(keyDAO).updateEdgeStatus(USER, "running");
-		verifyNoMoreInteractions(keyDAO);
 		verifyZeroInteractions(exploratoryDAO, computationalDAO);
 	}
 
 	@Test
 	public void updateResourceDataForExploratoryWhenStatusCompleted() {
 		ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
-		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
+		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(), anyString(),
 				any(UserInstanceStatus.class))).thenReturn(mock(UpdateResult.class));
-		doNothing().when(exploratoryDAO).updateReuploadKeyForExploratory(anyString(), anyString(), anyBoolean());
+		doNothing().when(exploratoryDAO).updateReuploadKeyForExploratory(anyString(), anyString(), anyString(), anyBoolean());
 
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.COMPLETED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, RUNNING);
-		verify(exploratoryDAO).updateReuploadKeyForExploratory(USER, EXPLORATORY_NAME, false);
+		verify(exploratoryDAO).updateStatusForExploratory(USER, null, EXPLORATORY_NAME, RUNNING);
+		verify(exploratoryDAO).updateReuploadKeyForExploratory(USER, null, EXPLORATORY_NAME, false);
 		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(keyDAO, computationalDAO);
+		verifyZeroInteractions(computationalDAO);
 	}
 
 	@Test
 	public void updateResourceDataForExploratoryWhenStatusFailed() {
 		ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
-		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
+		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(), anyString(),
 				any(UserInstanceStatus.class))).thenReturn(mock(UpdateResult.class));
 
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.FAILED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, RUNNING);
+		verify(exploratoryDAO).updateStatusForExploratory(USER, null, EXPLORATORY_NAME, RUNNING);
 		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(keyDAO, computationalDAO);
+		verifyZeroInteractions(computationalDAO);
 	}
 
 	@Test
 	public void updateResourceDataForClusterWhenStatusCompleted() {
 		ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME, "compName");
 		doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
-				any(UserInstanceStatus.class));
+				anyString(), any(UserInstanceStatus.class));
 		doNothing().when(computationalDAO).updateReuploadKeyFlagForComputationalResource(anyString(), anyString(),
-				anyString(), anyBoolean());
+				anyString(), anyString(), anyBoolean());
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.COMPLETED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME, "compName", RUNNING);
-		verify(computationalDAO).updateReuploadKeyFlagForComputationalResource(USER, EXPLORATORY_NAME, "compName",
-				false);
+		verify(computationalDAO).updateStatusForComputationalResource(USER, null, EXPLORATORY_NAME, "compName", RUNNING);
+		verify(computationalDAO).updateReuploadKeyFlagForComputationalResource(USER, null, EXPLORATORY_NAME,
+				"compName", false);
 		verifyNoMoreInteractions(computationalDAO);
-		verifyZeroInteractions(exploratoryDAO, keyDAO);
+		verifyZeroInteractions(exploratoryDAO);
 	}
 
 	@Test
 	public void updateResourceDataForClusterWhenStatusFailed() {
 		ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME, "compName");
 		doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
-				any(UserInstanceStatus.class));
+				anyString(), any(UserInstanceStatus.class));
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.FAILED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME, "compName", RUNNING);
+		verify(computationalDAO).updateStatusForComputationalResource(USER, null, EXPLORATORY_NAME, "compName", RUNNING);
 		verifyNoMoreInteractions(computationalDAO);
-		verifyZeroInteractions(exploratoryDAO, keyDAO);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKeyActionForEdge() {
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), anyString());
-		ReuploadKeyDTO reuploadFile = mock(ReuploadKeyDTO.class);
-		when(requestBuilder.newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class)))
-				.thenReturn(reuploadFile);
-		String expectedUuid = "someUuid";
-		when(provisioningService.post(anyString(), anyString(), any(ReuploadKeyDTO.class), any(), any(Map.class)))
-				.thenReturn(expectedUuid);
-		when(requestId.put(anyString(), anyString())).thenReturn(expectedUuid);
-
-		ResourceData resource = new ResourceData(ResourceType.EDGE, "someId", null, null);
-		reuploadKeyService.reuploadKeyAction(userInfo, resource);
-
-		verify(keyDAO).updateEdgeStatus(USER, "reuploading key");
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(""), any(List.class));
-		verify(provisioningService).post("/key/reupload", TOKEN, reuploadFile, String.class,
-				Collections.singletonMap("is_primary_reuploading", false));
-		verify(requestId).put(USER, expectedUuid);
-		verifyNoMoreInteractions(keyDAO, requestBuilder, provisioningService, requestId);
-		verifyZeroInteractions(exploratoryDAO, computationalDAO);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKeyActionForEdgeWithException() {
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), eq("reuploading key"));
-		doThrow(new DlabException("Couldn't reupload key to edge"))
-				.when(requestBuilder).newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class));
-		doNothing().when(keyDAO).updateEdgeStatus(anyString(), eq("running"));
-
-		ResourceData resource = new ResourceData(ResourceType.EDGE, "someId", null, null);
-		try {
-			reuploadKeyService.reuploadKeyAction(userInfo, resource);
-		} catch (DlabException e) {
-			assertEquals("Couldn't reupload key to edge_node for user test:\tCouldn't reupload key to edge",
-					e.getMessage());
-		}
-
-		verify(keyDAO).updateEdgeStatus(USER, "reuploading key");
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(""), any(List.class));
-		verify(keyDAO).updateEdgeStatus(USER, "running");
-		verifyNoMoreInteractions(keyDAO, requestBuilder);
-		verifyZeroInteractions(exploratoryDAO, computationalDAO, provisioningService, requestId);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKeyActionForExploratory() {
-		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
-				any(UserInstanceStatus.class))).thenReturn(mock(UpdateResult.class));
-		ReuploadKeyDTO reuploadFile = mock(ReuploadKeyDTO.class);
-		when(requestBuilder.newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class)))
-				.thenReturn(reuploadFile);
-		String expectedUuid = "someUuid";
-		when(provisioningService.post(anyString(), anyString(), any(ReuploadKeyDTO.class), any(), any(Map.class)))
-				.thenReturn(expectedUuid);
-		when(requestId.put(anyString(), anyString())).thenReturn(expectedUuid);
-
-		ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
-		reuploadKeyService.reuploadKeyAction(userInfo, resource);
-
-		verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, REUPLOADING_KEY);
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(""), any(List.class));
-		verify(provisioningService).post("/key/reupload", TOKEN, reuploadFile, String.class,
-				Collections.singletonMap("is_primary_reuploading", false));
-		verify(requestId).put(USER, expectedUuid);
-		verifyNoMoreInteractions(exploratoryDAO, requestBuilder, provisioningService, requestId);
-		verifyZeroInteractions(keyDAO, computationalDAO);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKeyActionForExploratoryWithException() {
-		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
-				eq(REUPLOADING_KEY))).thenReturn(mock(UpdateResult.class));
-		doThrow(new DlabException("Couldn't reupload key to exploratory"))
-				.when(requestBuilder).newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class));
-		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
-				eq(RUNNING))).thenReturn(mock(UpdateResult.class));
-
-		ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
-		try {
-			reuploadKeyService.reuploadKeyAction(userInfo, resource);
-		} catch (DlabException e) {
-			assertEquals("Couldn't reupload key to exploratory explName for user test:\tCouldn't reupload key to " +
-					"exploratory", e.getMessage());
-		}
-
-		verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, REUPLOADING_KEY);
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(""), any(List.class));
-		verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, RUNNING);
-		verifyNoMoreInteractions(exploratoryDAO, requestBuilder);
-		verifyZeroInteractions(keyDAO, computationalDAO, provisioningService, requestId);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKeyActionForCluster() {
-		doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
-				any(UserInstanceStatus.class));
-		ReuploadKeyDTO reuploadFile = mock(ReuploadKeyDTO.class);
-		when(requestBuilder.newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class)))
-				.thenReturn(reuploadFile);
-		String expectedUuid = "someUuid";
-		when(provisioningService.post(anyString(), anyString(), any(ReuploadKeyDTO.class), any(), any(Map.class)))
-				.thenReturn(expectedUuid);
-		when(requestId.put(anyString(), anyString())).thenReturn(expectedUuid);
-
-		ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME,
-				"compName");
-		reuploadKeyService.reuploadKeyAction(userInfo, resource);
-
-		verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME,
-				"compName", REUPLOADING_KEY);
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(""), any(List.class));
-		verify(provisioningService).post("/key/reupload", TOKEN, reuploadFile, String.class,
-				Collections.singletonMap("is_primary_reuploading", false));
-		verify(requestId).put(USER, expectedUuid);
-		verifyNoMoreInteractions(computationalDAO, requestBuilder, provisioningService, requestId);
-		verifyZeroInteractions(keyDAO, exploratoryDAO);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void reuploadKeyActionForClusterWithException() {
-		doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
-				eq(REUPLOADING_KEY));
-		doThrow(new DlabException("Couldn't reupload key to cluster"))
-				.when(requestBuilder).newKeyReupload(any(UserInfo.class), anyString(), anyString(), any(List.class));
-		doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
-				eq(RUNNING));
-
-		ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME,
-				"compName");
-		try {
-			reuploadKeyService.reuploadKeyAction(userInfo, resource);
-		} catch (DlabException e) {
-			assertEquals("Couldn't reupload key to computational_resource compName affiliated with exploratory " +
-					"explName for user test:\tCouldn't reupload key to cluster", e.getMessage());
-		}
-
-		verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME,
-				"compName", REUPLOADING_KEY);
-		verify(requestBuilder).newKeyReupload(refEq(userInfo), anyString(), eq(""), any(List.class));
-		verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME,
-				"compName", RUNNING);
-		verifyNoMoreInteractions(computationalDAO, requestBuilder);
-		verifyZeroInteractions(keyDAO, exploratoryDAO, provisioningService, requestId);
+		verifyZeroInteractions(exploratoryDAO);
 	}
 
 	private UserInfo getUserInfo() {
 		return new UserInfo(USER, TOKEN);
 	}
 
-	private UserInstanceDTO getUserInstance() {
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME);
-	}
-
 	private ReuploadKeyStatusDTO getReuploadKeyStatusDTO(ResourceData resource, ReuploadKeyStatus status) {
 		return new ReuploadKeyStatusDTO().withReuploadKeyCallbackDto(
 				new ReuploadKeyCallbackDTO().withResource(resource)).withReuploadKeyStatus(status).withUser(USER);
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
index 8a7d8ec..c025651 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
@@ -43,16 +43,40 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
 import java.time.temporal.ChronoUnit;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Optional;
 import java.util.stream.Collectors;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
 import static java.util.Collections.singletonList;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.mockito.Matchers.anyVararg;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class SchedulerJobServiceImplTest {
@@ -92,84 +116,83 @@
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratory() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString()))
 				.thenReturn(Optional.of(schedulerJobDTO));
 
 		SchedulerJobDTO actualSchedulerJobDto =
-				schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+				schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		assertNotNull(actualSchedulerJobDto);
 		assertEquals(schedulerJobDTO, actualSchedulerJobDto);
 
-		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO, schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratoryWhenNotebookNotExist() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString())).thenReturn(Optional.empty());
+		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString())).thenReturn(Optional.empty());
 		try {
-			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Scheduler job data not found for user test with exploratory explName", e.getMessage());
 		}
-		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchEmptySchedulerJobForUserAndExploratory() {
-		when(exploratoryDAO.isExploratoryExist(anyString(), anyString())).thenReturn(true);
-		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString()))
 				.thenReturn(Optional.empty());
 		try {
-			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Scheduler job data not found for user test with exploratory explName", e.getMessage());
 		}
-		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO, schedulerJobDAO);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
+		verifyNoMoreInteractions(schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchSchedulerJobForComputationalResource() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(Optional.of(schedulerJobDTO));
 
 		SchedulerJobDTO actualSchedulerJobDto = schedulerJobService
-				.fetchSchedulerJobForComputationalResource(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+				.fetchSchedulerJobForComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		assertNotNull(actualSchedulerJobDto);
 		assertEquals(schedulerJobDTO, actualSchedulerJobDto);
 
-		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(computationalDAO, schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchEmptySchedulerJobForComputationalResource() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(Optional.empty());
 		try {
-			schedulerJobService.fetchSchedulerJobForComputationalResource(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+			schedulerJobService.fetchSchedulerJobForComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Scheduler job data not found for user test with exploratory explName with " +
 					"computational resource compName", e.getMessage());
 		}
-		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(computationalDAO, schedulerJobDAO);
 	}
 
 	@Test
 	public void updateSchedulerDataForUserAndExploratory() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).updateSchedulerSyncFlag(USER, EXPLORATORY_NAME, false);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).updateSchedulerSyncFlag(USER, PROJECT, EXPLORATORY_NAME, false);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -177,13 +200,13 @@
 	@Test
 	public void updateSchedulerDataForUserAndExploratoryWhenMethodFetchExploratoryFieldsThrowsException() {
 		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 		try {
-			schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+			schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Exploratory for user with name not found", e.getMessage());
 		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -191,14 +214,14 @@
 	@Test
 	public void updateSchedulerDataForUserAndExploratoryWithInapproprietaryStatus() {
 		userInstance.withStatus("terminated");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		try {
-			schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+			schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 		} catch (ResourceInappropriateStateException e) {
 			assertEquals("Can not create/update scheduler for user instance with status: terminated",
 					e.getMessage());
 		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -208,21 +231,21 @@
 		schedulerJobDTO.setBeginDate(null);
 		schedulerJobDTO.setTimeZoneOffset(null);
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
 		assertNull(schedulerJobDTO.getBeginDate());
 		assertNull(schedulerJobDTO.getTimeZoneOffset());
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
 		assertEquals(LocalDate.now(), schedulerJobDTO.getBeginDate());
 		assertEquals(OffsetDateTime.now(ZoneId.systemDefault()).getOffset(), schedulerJobDTO.getTimeZoneOffset());
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).updateSchedulerSyncFlag(USER, EXPLORATORY_NAME, false);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).updateSchedulerSyncFlag(USER, PROJECT, EXPLORATORY_NAME, false);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -232,25 +255,24 @@
 	public void updateSchedulerDataForUserAndExploratoryWithSyncStartRequiredParam() {
 		userInstance.withStatus("running");
 		schedulerJobDTO.setSyncStartRequired(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), any(List.class),
-				anyString(), anyVararg())).thenReturn(singletonList(COMPUTATIONAL_NAME));
+		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), anyString(),
+				any(List.class), anyString(), anyVararg())).thenReturn(singletonList(COMPUTATIONAL_NAME));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER,
-				singletonList(DataEngineType.SPARK_STANDALONE),
-				EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER, PROJECT,
+				singletonList(DataEngineType.SPARK_STANDALONE), EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
 		schedulerJobDTO.setEndTime(null);
 		schedulerJobDTO.setStopDaysRepeat(Collections.emptyList());
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
-				COMPUTATIONAL_NAME, schedulerJobDTO);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+				EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -259,19 +281,18 @@
 	public void updateSchedulerDataForUserAndExploratoryWithSyncStartRequiredParamButAbsenceClusters() {
 		userInstance.withStatus("running");
 		schedulerJobDTO.setSyncStartRequired(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), any(List.class),
-				anyString(), anyVararg())).thenReturn(Collections.emptyList());
+		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), anyString(),
+				any(List.class), anyString(), anyVararg())).thenReturn(Collections.emptyList());
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER,
-				singletonList(DataEngineType.SPARK_STANDALONE),
-				EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER, PROJECT,
+				singletonList(DataEngineType.SPARK_STANDALONE), EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -279,30 +300,30 @@
 	@Test
 	public void updateSchedulerDataForComputationalResource() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
-		schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				schedulerJobDTO);
-
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
+		schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
 				COMPUTATIONAL_NAME, schedulerJobDTO);
+
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+				EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
 	@Test
 	public void updateSchedulerDataForComputationalResourceWhenSchedulerIsNull() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
 		final SchedulerJobDTO schedulerJobDTO = getSchedulerJobDTO(LocalDate.now(), LocalDate.now().plusDays(1),
 				Arrays.asList(DayOfWeek.values()), Arrays.asList(DayOfWeek.values()), false,
@@ -310,12 +331,12 @@
 				LocalTime.now().truncatedTo(ChronoUnit.MINUTES));
 		schedulerJobDTO.setStartDaysRepeat(null);
 		schedulerJobDTO.setStopDaysRepeat(null);
-		schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				schedulerJobDTO);
+		schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
+				COMPUTATIONAL_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(eq(USER), eq(EXPLORATORY_NAME),
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
 				eq(COMPUTATIONAL_NAME), refEq(schedulerJobDTO));
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
@@ -323,18 +344,17 @@
 	@Test
 	public void updateSchedulerDataForComputationalResourceWhenMethodFetchComputationalFieldsThrowsException() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		doThrow(new ResourceNotFoundException("Computational resource for user with name not found"))
-				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
+				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
 		try {
-			schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME,
-					COMPUTATIONAL_NAME, schedulerJobDTO);
+			schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Computational resource for user with name not found", e.getMessage());
 		}
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -342,18 +362,17 @@
 	public void updateSchedulerDataForComputationalResourceWithInapproprietaryClusterStatus() {
 		userInstance.setStatus("running");
 		userInstance.getResources().get(0).setStatus("terminated");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		try {
-			schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME,
-					COMPUTATIONAL_NAME, schedulerJobDTO);
+			schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		} catch (ResourceInappropriateStateException e) {
 			assertEquals("Can not create/update scheduler for user instance with status: terminated",
 					e.getMessage());
 		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -362,25 +381,25 @@
 		schedulerJobDTO.setBeginDate(null);
 		schedulerJobDTO.setTimeZoneOffset(null);
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
 		assertNull(schedulerJobDTO.getBeginDate());
 		assertNull(schedulerJobDTO.getTimeZoneOffset());
 
-		schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				schedulerJobDTO);
+		schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
+				COMPUTATIONAL_NAME, schedulerJobDTO);
 
 		assertEquals(LocalDate.now(), schedulerJobDTO.getBeginDate());
 		assertEquals(OffsetDateTime.now(ZoneId.systemDefault()).getOffset(), schedulerJobDTO.getTimeZoneOffset());
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
-				COMPUTATIONAL_NAME, schedulerJobDTO);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+				EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -499,7 +518,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO)
 				.getComputationalSchedulerDataWithOneOfStatus(RUNNING, DataEngineType.SPARK_STANDALONE, RUNNING);
-		verify(computationalService).stopSparkCluster(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
+		verify(computationalService).stopSparkCluster(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME),
 				eq(COMPUTATIONAL_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService);
 	}
@@ -597,7 +616,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerWithStatusAndClusterLastActivityLessThan(eq(RUNNING),
 				any(Date.class));
-		verify(exploratoryService).stop(refEq(getUserInfo()), eq(EXPLORATORY_NAME));
+		verify(exploratoryService).stop(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService);
 	}
 
@@ -711,7 +730,7 @@
 				)));
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
 		when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
-				any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+				anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
 				DataEngineType.SPARK_STANDALONE, true)));
 
 		schedulerJobService.startExploratoryByScheduler();
@@ -719,7 +738,7 @@
 		verify(securityService, times(2)).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
 		verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
-		verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+		verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
 		verify(computationalService).startSparkCluster(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
 				eq(COMPUTATIONAL_NAME), eq(PROJECT));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalService,
@@ -738,7 +757,7 @@
 				)));
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
 		when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
-				any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+				anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
 				DataEngineType.CLOUD_SERVICE, true)));
 
 		schedulerJobService.startExploratoryByScheduler();
@@ -746,7 +765,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
 		verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
-		verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+		verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalDAO);
 		verifyZeroInteractions(computationalService);
 	}
@@ -763,7 +782,7 @@
 				)));
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
 		when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
-				any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+				anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
 				DataEngineType.SPARK_STANDALONE, false)));
 
 		schedulerJobService.startExploratoryByScheduler();
@@ -771,7 +790,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
 		verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
-		verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+		verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalDAO);
 		verifyZeroInteractions(computationalService);
 	}
@@ -862,8 +881,8 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO)
 				.getComputationalSchedulerDataWithOneOfStatus(RUNNING, STOPPED, RUNNING);
-		verify(computationalService).terminateComputational(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
-				eq(COMPUTATIONAL_NAME));
+		verify(computationalService).terminateComputational(refEq(getUserInfo()), eq(PROJECT),
+				eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService);
 	}
 
@@ -955,7 +974,7 @@
 
 		verify(securityService).getUserInfoOffline(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithOneOfStatus(RUNNING, STOPPED);
-		verify(exploratoryService).terminate(refEq(getUserInfo()), eq(EXPLORATORY_NAME));
+		verify(exploratoryService).terminate(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService, exploratoryService);
 	}
 
@@ -1085,8 +1104,11 @@
 	private UserInstanceDTO getUserInstanceDTO() {
 		UserComputationalResource computationalResource = new UserComputationalResource();
 		computationalResource.setStatus("running");
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
-				.withResources(singletonList(computationalResource));
+		return new UserInstanceDTO()
+				.withUser(USER)
+				.withExploratoryName(EXPLORATORY_NAME)
+				.withResources(singletonList(computationalResource))
+				.withProject(PROJECT);
 	}
 
 	private AwsComputationalResource getComputationalResource(DataEngineType dataEngineType,
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java
index d494d5d..4fec7c6 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java
@@ -23,10 +23,13 @@
 import com.epam.dlab.backendapi.dao.UserGroupDao;
 import com.epam.dlab.backendapi.dao.UserRoleDao;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.resources.TestBase;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
+import io.dropwizard.auth.AuthenticationException;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -36,39 +39,50 @@
 import org.mockito.runners.MockitoJUnitRunner;
 
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.List;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anySet;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
-public class UserGroupServiceImplTest {
+public class UserGroupServiceImplTest extends TestBase {
 
-	private static final String ROLE_ID = "Role id";
-	private static final String USER = "test";
-	private static final String GROUP = "admin";
-	@Mock
-	private UserRoleDao userRoleDao;
-	@Mock
-	private UserGroupDao userGroupDao;
-	@Mock
-	private ProjectDAO projectDAO;
-	@InjectMocks
-	private UserGroupServiceImpl userGroupService;
+    private static final String ROLE_ID = "Role id";
+    private static final String USER = "test";
+    private static final String GROUP = "admin";
+    @Mock
+    private UserRoleDao userRoleDao;
+    @Mock
+    private UserGroupDao userGroupDao;
+    @Mock
+    private ProjectDAO projectDAO;
+    @InjectMocks
+    private UserGroupServiceImpl userGroupService;
 
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
+    @Rule
+    public ExpectedException expectedException = ExpectedException.none();
 
-	@Test
-	public void createGroup() {
-		when(userRoleDao.addGroupToRole(anySet(), anySet())).thenReturn(true);
+    @Before
+    public void setup() throws AuthenticationException {
+        authSetup();
+    }
 
-		userGroupService.createGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+    @Test
+    public void createGroup() {
+        when(userRoleDao.addGroupToRole(anySet(), anySet())).thenReturn(true);
 
-		verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-		verify(userGroupDao).addUsers(GROUP, Collections.singleton(USER));
-	}
+        userGroupService.createGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+
+        verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
+        verify(userGroupDao).addUsers(GROUP, Collections.singleton(USER));
+    }
 
 	@Test
 	public void createGroupWithNoUsers() {
@@ -77,7 +91,7 @@
 		userGroupService.createGroup(GROUP, Collections.singleton(ROLE_ID), Collections.emptySet());
 
 		verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-		verify(userGroupDao, never()).addUsers(anyString(), anySet());
+		verify(userGroupDao).addUsers(anyString(), anySet());
 	}
 
 	@Test
@@ -89,75 +103,11 @@
 	}
 
 	@Test
-	public void getAggregatedRoles() {
-		when(userRoleDao.aggregateRolesByGroup()).thenReturn(Collections.singletonList(getUserGroup()));
-
-		final List<UserGroupDto> aggregatedRolesByGroup = userGroupService.getAggregatedRolesByGroup();
-
-		assertEquals(1, aggregatedRolesByGroup.size());
-		assertEquals(GROUP, aggregatedRolesByGroup.get(0).getGroup());
-		assertTrue(aggregatedRolesByGroup.get(0).getRoles().isEmpty());
-
-		verify(userRoleDao).aggregateRolesByGroup();
-		verifyNoMoreInteractions(userRoleDao);
-	}
-
-	@Test
-	public void addUserToGroup() {
-		userGroupService.addUsersToGroup(GROUP, Collections.singleton(USER));
-
-		verify(userGroupDao).addUsers(eq(GROUP), refEq(Collections.singleton(USER)));
-		verifyNoMoreInteractions(userRoleDao, userGroupDao);
-	}
-
-	@Test
-	public void addRolesToGroup() {
-		when(userRoleDao.addGroupToRole(anySetOf(String.class), anySetOf(String.class))).thenReturn(true);
-
-		userGroupService.updateRolesForGroup(GROUP, Collections.singleton(ROLE_ID));
-
-		verify(userRoleDao).addGroupToRole(refEq(Collections.singleton(GROUP)), refEq(Collections.singleton(ROLE_ID)));
-		verify(userRoleDao).removeGroupWhenRoleNotIn(GROUP, Collections.singleton(ROLE_ID));
-		verifyNoMoreInteractions(userRoleDao);
-	}
-
-	@Test
-	public void removeUserFromGroup() {
-
-		userGroupService.removeUserFromGroup(GROUP, USER);
-
-		verify(userGroupDao).removeUser(GROUP, USER);
-		verifyNoMoreInteractions(userGroupDao);
-	}
-
-	@Test
-	public void removeGroupFromRole() {
-
-		when(userRoleDao.removeGroupFromRole(anySetOf(String.class), anySetOf(String.class))).thenReturn(true);
-
-		userGroupService.removeGroupFromRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-
-		verify(userRoleDao).removeGroupFromRole(refEq(Collections.singleton(GROUP)),
-				refEq(Collections.singleton(ROLE_ID)));
-		verifyNoMoreInteractions(userRoleDao);
-	}
-
-	@Test
-	public void removeGroupFromRoleWithException() {
-		when(userRoleDao.removeGroupFromRole(anySetOf(String.class), anySetOf(String.class))).thenReturn(false);
-
-		expectedException.expectMessage("Any of role : [" + ROLE_ID + "] were not found");
-		expectedException.expect(ResourceNotFoundException.class);
-
-		userGroupService.removeGroupFromRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-	}
-
-	@Test
 	public void removeGroup() {
 
 		when(userRoleDao.removeGroup(anyString())).thenReturn(true);
 		final ProjectDTO projectDTO = new ProjectDTO(
-				"name", Collections.emptySet(), "", "", null, Collections.emptyList());
+				"name", Collections.emptySet(), "", "", null, Collections.emptyList(), true);
 		when(projectDAO.getProjectsWithEndpointStatusNotIn(UserInstanceStatus.TERMINATED,
 				UserInstanceStatus.TERMINATING)).thenReturn(Collections.singletonList(projectDTO));
 		doNothing().when(userGroupDao).removeGroup(anyString());
@@ -175,7 +125,7 @@
 		when(userRoleDao.removeGroup(anyString())).thenReturn(true);
 		when(projectDAO.getProjectsWithEndpointStatusNotIn(UserInstanceStatus.TERMINATED,
 				UserInstanceStatus.TERMINATING)).thenReturn(Collections.singletonList( new ProjectDTO(
-				"name", Collections.singleton(GROUP), "", "", null, Collections.emptyList())));
+				"name", Collections.singleton(GROUP), "", "", null, Collections.emptyList(), true)));
 		doNothing().when(userGroupDao).removeGroup(anyString());
 
 		try {
@@ -193,7 +143,7 @@
 	public void removeGroupWhenGroupNotExist() {
 
 		final ProjectDTO projectDTO = new ProjectDTO(
-				"name", Collections.emptySet(), "", "", null, Collections.emptyList());
+				"name", Collections.emptySet(), "", "", null, Collections.emptyList(), true);
 		when(projectDAO.getProjectsWithEndpointStatusNotIn(UserInstanceStatus.TERMINATED,
 				UserInstanceStatus.TERMINATING)).thenReturn(Collections.singletonList(projectDTO));
 		when(userRoleDao.removeGroup(anyString())).thenReturn(false);
@@ -209,7 +159,7 @@
 	@Test
 	public void removeGroupWithException() {
 		final ProjectDTO projectDTO = new ProjectDTO(
-				"name", Collections.emptySet(), "", "", null, Collections.emptyList());
+				"name", Collections.emptySet(), "", "", null, Collections.emptyList(), true);
 		when(projectDAO.getProjectsWithEndpointStatusNotIn(UserInstanceStatus.TERMINATED,
 				UserInstanceStatus.TERMINATING)).thenReturn(Collections.singletonList(projectDTO));
 		when(userRoleDao.removeGroup(anyString())).thenThrow(new DlabException("Exception"));
@@ -220,17 +170,13 @@
 		userGroupService.removeGroup(GROUP);
 	}
 
-	@Test
-	public void updateGroup() {
-		userGroupService.updateGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+    private UserGroupDto getUserGroup() {
+        return new UserGroupDto(GROUP, Collections.emptyList(), Collections.emptySet());
+    }
 
-		verify(userGroupDao).updateUsers(GROUP, Collections.singleton(USER));
-		verify(userRoleDao).removeGroupWhenRoleNotIn(GROUP, Collections.singleton(ROLE_ID));
-		verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-		verifyNoMoreInteractions(userRoleDao, userGroupDao);
-	}
-
-	private UserGroupDto getUserGroup() {
-		return new UserGroupDto(GROUP, Collections.emptyList(), Collections.emptySet());
-	}
+    private List<ProjectDTO> getProjects() {
+        return Collections.singletonList(ProjectDTO.builder()
+                .groups(new HashSet<>(Collections.singletonList(GROUP)))
+                .build());
+    }
 }
\ No newline at end of file
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserResourceServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserResourceServiceImplTest.java
deleted file mode 100644
index 5343c2b..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserResourceServiceImplTest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package com.epam.dlab.backendapi.service.impl;
-
-
-import com.epam.dlab.backendapi.service.ComputationalService;
-import com.epam.dlab.backendapi.service.EdgeService;
-import com.epam.dlab.backendapi.service.ExploratoryService;
-import com.epam.dlab.dto.UserInstanceDTO;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.dto.computational.UserComputationalResource;
-import com.epam.dlab.model.ResourceData;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import static com.epam.dlab.dto.UserInstanceStatus.*;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
-
-@RunWith(MockitoJUnitRunner.class)
-public class UserResourceServiceImplTest {
-
-	private final String USER = "test";
-	private final String EXPLORATORY_NAME = "explName";
-
-	@Mock
-	private ExploratoryService exploratoryService;
-	@Mock
-	private ComputationalService computationalService;
-	@Mock
-	private EdgeService edgeService;
-
-	@InjectMocks
-	private UserResourceServiceImpl userResourceService;
-
-	@Test
-	public void convertToResourceData() {
-		List<UserInstanceDTO> userInstances = Collections.singletonList(getUserInstance());
-		List<ResourceData> expectedResourceList = Arrays.asList(
-				ResourceData.exploratoryResource("explId", EXPLORATORY_NAME),
-				ResourceData.computationalResource("compId", EXPLORATORY_NAME, "compName")
-		);
-		List<ResourceData> actualResourceList = userResourceService.convertToResourceData(userInstances);
-		assertEquals(2, actualResourceList.size());
-		assertEquals(expectedResourceList.get(0).toString(), actualResourceList.get(0).toString());
-		assertEquals(expectedResourceList.get(1).toString(), actualResourceList.get(1).toString());
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void updateReuploadKeyFlagForUserResources() {
-		doNothing().when(exploratoryService).updateExploratoriesReuploadKeyFlag(anyString(), anyBoolean(), anyVararg
-				());
-		doNothing().when(computationalService).updateComputationalsReuploadKeyFlag(anyString(), any(List.class),
-				any(List.class), anyBoolean(), anyVararg());
-		doNothing().when(edgeService).updateReuploadKeyFlag(anyString(), anyBoolean(), anyVararg());
-
-		userResourceService.updateReuploadKeyFlagForUserResources(USER, false);
-
-		verify(exploratoryService).updateExploratoriesReuploadKeyFlag(USER, false,
-				CREATING, CONFIGURING, STARTING, RUNNING, STOPPING, STOPPED);
-		verify(computationalService).updateComputationalsReuploadKeyFlag(USER,
-				Arrays.asList(STARTING, RUNNING, STOPPING, STOPPED),
-				Collections.singletonList(DataEngineType.SPARK_STANDALONE),
-				false,
-				CREATING, CONFIGURING, STARTING, RUNNING, STOPPING, STOPPED);
-		verify(computationalService).updateComputationalsReuploadKeyFlag(USER,
-				Collections.singletonList(RUNNING),
-				Collections.singletonList(DataEngineType.CLOUD_SERVICE),
-				false,
-				CREATING, CONFIGURING, STARTING, RUNNING);
-		verify(edgeService).updateReuploadKeyFlag(USER, false, STARTING, RUNNING, STOPPING, STOPPED);
-		verifyNoMoreInteractions(exploratoryService, computationalService, edgeService);
-	}
-
-	private UserInstanceDTO getUserInstance() {
-		UserComputationalResource computationalResource = new UserComputationalResource();
-		computationalResource.setComputationalId("compId");
-		computationalResource.setComputationalName("compName");
-		return new UserInstanceDTO()
-				.withUser(USER)
-				.withExploratoryId("explId")
-				.withExploratoryName(EXPLORATORY_NAME)
-				.withResources(Collections.singletonList(computationalResource));
-	}
-}